repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
delora | delora-main/bin/visualize_pointcloud_normals.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import numpy as np
import torch
import yaml
import ros_utils.publish_point_cloud_and_normals
def config():
f = open('config/config_datasets.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/deployment_options.yaml')
deployment_options = yaml.load(f, Loader=yaml.FullLoader)
config.update(deployment_options)
# Device to be used
if config["device"] == "cuda":
config["device"] = torch.device("cuda")
else:
config["device"] = torch.device("cpu")
# Data identifiers
for dataset in config["datasets"]:
if config["mode"] == "training":
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"]
elif config["mode"] == "testing":
config[dataset]["data_identifiers"] = config[dataset]["testing_identifiers"]
else:
raise Exception('Only modes "training" and "testing" are valid.')
# Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config()
publisher = ros_utils.publish_point_cloud_and_normals.ROSPublisher(config=config)
publisher.publish_dataset() | 1,863 | 35.54902 | 89 | py |
delora | delora-main/bin/preprocess_data.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import os
import numpy as np
import torch
import yaml
import preprocessing.preprocesser
def yes_or_no(question):
while "the answer is invalid":
reply = str(input(question + ' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
else:
return False
def config():
# Load parameters
f = open('config/config_datasets.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/deployment_options.yaml')
deployment_options = yaml.load(f, Loader=yaml.FullLoader)
config.update(deployment_options)
# Device to be used
config["device"] = torch.device(config["device"])
for dataset in config["datasets"]:
config[dataset]["horizontal_cells"] = config[dataset]["horizontal_cells_preprocessing"]
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + config[dataset][
"testing_identifiers"]
# Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
# Check whether rosbag exists
for dataset in config["datasets"]:
print("Checking whether path to " + config[dataset]["data_path"] + " exists.")
if not os.path.exists(config[dataset]["data_path"]):
raise Exception("Path " + config[dataset]["data_path"] + " does not exist. Exiting.")
# User check for correctness of paths -------------
print("----------------------------------")
print("Run for the datasets: " + str(config["datasets"]))
print("which are located at")
for dataset in config["datasets"]:
print(config[dataset]["data_path"])
print("and will be stored at")
for dataset in config["datasets"]:
print(config[dataset]["preprocessed_path"])
print("----------")
if not yes_or_no("Continue?"):
print("Okay, then program will be stopped.")
exit()
# -------------------------------------------------
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config()
preprocesser = preprocessing.preprocesser.Preprocesser(config=config)
preprocesser.preprocess_data()
| 2,781 | 33.345679 | 104 | py |
delora | delora-main/bin/run_testing.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import click
import numpy as np
import torch
import yaml
import deploy.tester
@click.command()
@click.option('--testing_run_name', prompt='MLFlow name of the run',
help='The name under which the run can be found afterwards.')
@click.option('--experiment_name', help='High-level testing sequence name for clustering in MLFlow.',
default="testing")
@click.option('--checkpoint', prompt='Path to the saved checkpoint of the model you want to test')
def config(testing_run_name, experiment_name, checkpoint):
f = open('config/config_datasets.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/deployment_options.yaml')
deployment_options = yaml.load(f, Loader=yaml.FullLoader)
config.update(deployment_options)
f = open('config/hyperparameters.yaml')
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader)
config.update(network_hyperparameters)
# Parameters from previous run?
if 'parameters' in torch.load(checkpoint):
print("\033[92m" +
"Found parameters in checkpoint! Setting part of parameters to those ones."
+ "\033[0;0m")
parameters_exist = True
else:
print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.")
parameters_exist = False
# Parameters that are set depending on whether provided in checkpoint
if parameters_exist:
loaded_config = torch.load(checkpoint)['parameters']
## Device to be used
loaded_config["device"] = torch.device(config["device"])
## Dataset selection
loaded_config["datasets"] = config["datasets"]
for dataset in loaded_config["datasets"]:
loaded_config[dataset]["testing_identifiers"] = config[dataset]["testing_identifiers"]
loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["testing_identifiers"]
## Inference only
loaded_config["inference_only"] = config["inference_only"]
loaded_config["store_dataset_in_RAM"] = config["store_dataset_in_RAM"]
config = loaded_config
# Some parameters are only initialized when not taken from checkpoint
else:
## Device to be used
config["device"] = torch.device(config["device"])
for dataset in config["datasets"]:
config[dataset]["data_identifiers"] = config[dataset]["testing_identifiers"]
## Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
# Parameters that are always set
## No dropout during testing
if config["use_dropout"]:
config["use_dropout"] = False
print("Deactivating dropout for this mode.")
## CLI Input
### Testing run name
config["run_name"] = str(testing_run_name)
### Checkpoint
config["checkpoint"] = str(checkpoint)
### Experiment name, default specified in deployment_options.yaml
if experiment_name:
config["experiment"] = experiment_name
## Mode
config["mode"] = "testing"
## Unsupervised
config["unsupervised_at_start"] = True
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config(standalone_mode=False)
tester = deploy.tester.Tester(config=config)
tester.test()
| 3,958 | 39.397959 | 106 | py |
delora | delora-main/scripts/convert_kitti_to_rosbag.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import numpy as np
import torch
import yaml
import ros_utils.convert_to_rosbag
def config():
f = open('config/deployment_options.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/config_datasets.yaml')
dataset_config = yaml.load(f, Loader=yaml.FullLoader)
config.update(dataset_config)
# Device to be used
if config["device"] == "cuda":
config["device"] = torch.device("cuda")
else:
config["device"] = torch.device("cpu")
for dataset in config["datasets"]:
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + config[dataset][
"testing_identifiers"]
# Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
print("----------------------------------")
print("Configuration for this run: ")
print(config)
print("----------------------------------")
return config
if __name__ == "__main__":
config = config()
converter = ros_utils.convert_to_rosbag.RosbagConverter(config=config)
converter.convert()
| 1,588 | 31.428571 | 104 | py |
delora | delora-main/scripts/time_network.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import click
import numpy as np
import torch
import yaml
import models.model
import time
from torch.utils import mkldnn as mkldnn_utils
@click.command()
@click.option('--checkpoint', help='Path to the saved model you want to continue training from.',
default="")
def config(checkpoint):
f = open('config/deployment_options.yaml')
config = yaml.load(f, Loader=yaml.FullLoader)
f = open('config/config_datasets.yaml')
dataset_config = yaml.load(f, Loader=yaml.FullLoader)
config.update(dataset_config)
f = open('config/hyperparameters.yaml')
hyperparameters_config = yaml.load(f, Loader=yaml.FullLoader)
config.update(hyperparameters_config)
# CLI Input
if not checkpoint:
config["checkpoint"] = None
else:
config["checkpoint"] = str(checkpoint)
# Device to be used
config["device"] = torch.device(config["device"])
# Convert angles to radians
for dataset in config["datasets"]:
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0)
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0)
config["horizontal_field_of_view"][0] *= (np.pi / 180.0)
config["horizontal_field_of_view"][1] *= (np.pi / 180.0)
print("Configuration for this run: ")
print(config)
return config
if __name__ == "__main__":
config = config(standalone_mode=False)
iterations = 1000
torch.set_num_threads(4)
# CUDA synchronisation
torch.cuda.synchronize()
# Velodyne VLP-16
print("Velodyne VLP-16 --------------")
sample_input = torch.rand(1, 4, 16, 720).to(config["device"])
print("Used device is: " + str(config["device"]))
## Standard Model
model = models.model.OdometryModel(config=config).to(config["device"]).eval()
_, _ = model(sample_input, sample_input)
torch.cuda.synchronize()
t_accum = 0.0
for iteration in range(iterations):
t = time.time()
_, _ = model(sample_input, sample_input)
torch.cuda.synchronize()
t_delta = time.time() - t
t_accum += t_delta
print(str(t_delta * 1000) + "ms")
print("Average execution time of model is: " + str(t_accum / iterations * 1000) + " milliseconds.")
del model
model_jit = torch.jit.trace(
models.model.OdometryModel(config=config).to(config["device"]),
example_inputs=(sample_input, sample_input)).eval()
t_accum = 0.0
for iteration in range(iterations + 1):
torch.cuda.synchronize()
t = time.time()
_, _ = model_jit(sample_input, sample_input)
torch.cuda.synchronize()
t_delta = time.time() - t
if iteration != 0:
t_accum += t_delta
print(t_delta)
print(
"Average execution time of jit model is: " + str(t_accum / iterations * 1000) + " milliseconds.")
| 3,103 | 32.021277 | 105 | py |
delora | delora-main/scripts/convert_pytorch_models.py | #!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import click
import torch
@click.command()
@click.option('--checkpoint',
prompt='Path to the saved model (without .pth) you want to convert to older PyTorch compatibility.')
def convert_pytorch_model(checkpoint):
state_dict = torch.load(checkpoint + ".pth", map_location=torch.device("cpu"))
print(state_dict)
torch.save(state_dict, checkpoint + "_py27.pth", _use_new_zipfile_serialization=False)
if __name__ == "__main__":
convert_pytorch_model()
| 732 | 33.904762 | 114 | py |
rankpredictor | rankpredictor-master/src/indycar/sl-lstm.py | from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import matplotlib
import numpy
from numpy import concatenate
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df = df.drop(0)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, yhat):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# run a repeated experiment
def experiment(repeats, series):
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-12], supervised_values[-12:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# run experiment
error_scores = list()
for r in range(repeats):
# fit the base model
lstm_model = fit_lstm(train_scaled, 1, 500, 1)
# forecast test dataset
predictions = list()
for i in range(len(test_scaled)):
# predict
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
return error_scores
# execute the experiment
def run():
# load dataset
series = read_csv('shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# experiment
repeats = 10
results = DataFrame()
# run experiment
results['results'] = experiment(repeats, series)
# summarize results
print(results.describe())
# save results
results.to_csv('experiment_fixed.csv', index=False)
# entry point
run()
| 4,071 | 29.848485 | 113 | py |
rankpredictor | rankpredictor-master/src/indycar/stagedataset.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras import models, layers
import time
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import os
def plot_examples(X,y,ypreds=None,nm_ypreds=None):
fig = plt.figure(figsize=(16,10))
fig.subplots_adjust(hspace = 0.32,wspace = 0.15)
count = 1
n_ts = X.shape[0]
num_per_row = 2 if n_ts > 2 else 1
for irow in range(n_ts):
ax = fig.add_subplot(int(n_ts+num_per_row -1)/num_per_row,num_per_row,count)
#ax.set_ylim(-0.5,0.5)
ax.plot(X[irow,:,0],"--",label="x1")
ax.plot(y[irow,:,:],marker='.',label="y",linewidth=3,alpha = 0.5)
ax.set_title("{:}th time series sample".format(irow))
if ypreds is not None:
for ypred,nm in zip(ypreds,nm_ypreds):
ax.plot(ypred[irow,:,:],marker='.',label=nm)
count += 1
plt.legend()
plt.show()
#if ypreds is not None:
# for y_pred, nm_ypred in zip(ypreds,nm_ypreds):
# loss = np.mean( (y_pred[:,D:,:].flatten() - y[:,D:,:].flatten())**2)
# print("The final validation loss of {} is {:7.6f}".format(
# nm_ypred,loss))
def plot_vectors(y,ypred,y_idx,nm_ypreds='pred'):
fig = plt.figure(figsize=(16,10))
fig.subplots_adjust(hspace = 0.32,wspace = 0.15)
count = 1
n_ts = y_idx.shape[0]
num_per_row = 2 if n_ts > 2 else 1
start, end = 0, 0
for irow in range(n_ts):
end += y_idx[irow]
ax = fig.add_subplot(int(n_ts+num_per_row -1)/num_per_row,num_per_row,count)
#ax.set_ylim(-0.5,0.5)
#ax.plot(X[irow,:,0],"--",label="x1")
ax.plot(y[start:end],marker='.',label="y",linewidth=3,alpha = 0.5)
ax.set_title("{:}th time series sample".format(irow))
#if ypreds is not None:
# for ypred,nm in zip(ypreds,nm_ypreds):
# ax.plot(ypred[start:end],marker='.',label=nm)
ax.plot(ypred[start:end],marker='.',label='pred')
count += 1
start = end
plt.legend()
plt.show()
#if ypreds is not None:
# for y_pred, nm_ypred in zip(ypreds,nm_ypreds):
loss = np.mean( np.abs(ypred.flatten() - y.flatten()))
print("The final validation loss MAE is {:7.6f}".format(loss))
# load multiple datasets
def load_datalist(datalist):
#add
db = []
lens = []
for id, f in enumerate(datalist):
data = pd.read_csv(f)
data['dbid'] = id + 1
db.append(data)
carNumber = len(set(data.car_number))
lens.append(carNumber)
print('load %s, len=%d'%(f, data.shape[0]))
alldata = None
for d in db:
#update car_number with the dbid
d['car_number'] += d['dbid'] * 1000
if alldata is None:
alldata = d
else:
alldata = alldata.append(d)
#scaler
scaler = MinMaxScaler()
alldata[['rank_diff_raw', 'time_diff_raw']] = alldata[['rank_diff', 'time_diff']]
alldata[['rank_diff', 'time_diff']] = scaler.fit_transform(alldata[['rank_diff', 'time_diff']])
return scaler, alldata, lens
def generate_data(dataset, D= 1, target='rank', shuffle = False):
# dataset with multiple events, car_number is encoded with event id
# T is the max len
carNumber = len(set(dataset.car_number))
T = 0
for car, group in dataset.groupby('car_number'):
T = max(T, group.shape[0])
print('carNumber = %d, max T =%d'%(carNumber, T))
#variable len of time series
x_train , y_train = [], []
for car, group in dataset.groupby('car_number'):
x = list(group.time_diff)
if target == 'rank':
y = list(group.rank_diff)
elif target =='time':
y = list(group.time_diff)
else:
print('error in target setting as', target)
return None
#get train/label
retlen = len(x) - D
if retlen <=0 :
print('error with record, too short, car = %d, len=%d'%(car,len(x)))
continue
#output
x_train.append(x[:retlen])
y_train.append(y[D:])
if len(x_train) != carNumber:
print('error in carNumber')
return x_train, y_train, x
#convert to np array
X = np.zeros((carNumber, T-D, 1))
Y = np.zeros((carNumber, T-D, 1))
W = np.zeros((carNumber, T-D))
for car in range(carNumber):
reclen = len(x_train[car])
X[car, :reclen, 0] = np.array(x_train[car])
Y[car, :reclen, 0] = np.array(y_train[car])
W[car, :reclen] = 1
if shuffle:
idx = np.random.permutation(carNumber)
X = X[idx]
Y = Y[idx]
W = W[idx]
return X, Y, W
def read_list(listfile):
datalist = []
with open(listfile, 'r') as inf:
for l in inf:
datalist.append(l.strip())
return datalist
#from sklearn.utils import check_arrays
def mean_absolute_percentage_error(y_true, y_pred):
#y_true, y_pred = check_arrays(y_true, y_pred)
## Note: does not handle mix 1d representation
#if _is_1d(y_true):
# y_true, y_pred = _check_1d_array(y_true, y_pred)
idx = y_true != 0
#return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
return np.mean(np.abs((y_true[idx] - y_pred[idx]) / y_true[idx])) * 100
def generate_feature_vectors(X,Y,W, vect_len = 10):
'''
X shape: <samples, series_len, 1>
Y shape: <samples, series_len, 1>
W shape: <samples, series_len>
vect_len: output feature vector length
return:
vect_x, vect_y
ts_idx
'''
ts_cnt = X.shape[0]
#ts_cnt = 1
vect_x = []
vect_y = []
ts_idx = []
for rid in range(ts_cnt):
ts_x = X[rid,:,0]
ts_y = Y[rid,:,0]
ts_len = int(np.sum(W[rid]))
if ts_len < vect_len:
#ts is not long enough, skip it
print('len[%d]=%d is too short, skip'%(rid, ts_len))
continue
#extract multiple feature vectors from this ts
ts_idx.append(ts_len - vect_len + 1)
for i in range(ts_len - vect_len + 1):
#not padding
vect_x.append(ts_x[i:i+vect_len])
vect_y.append(ts_y[i+vect_len-1])
return np.array(vect_x), np.array(vect_y), np.array(ts_idx)
def predict(model_name,model, x_test, y_test_in, scaler=None, target='time'):
#prediction
if model_name == 'lstm':
n,m = x_test.shape
y_pred = model.predict(x_test.reshape((n,m,1)))
else:
y_pred = model.predict(x_test)
#flatten y
y_test = y_test_in.copy().flatten()
y_pred = y_pred.flatten()
#mae
mae = metrics.mean_absolute_error(y_test, y_pred)
#inverse scale to get original values
tmae = 0.
if scaler is not None:
n = y_test.shape[0]
Y_true = np.zeros((n,2))
if target == 'time':
Y_true[:,1] = y_test
else:
Y_true[:,0] = y_test
Y_true = scaler.inverse_transform(Y_true)
Y_pred = np.zeros((n,2))
if target == 'time':
Y_pred[:,1] = y_pred.reshape((n))
else:
Y_pred[:,0] = y_pred.reshape((n))
Y_pred = scaler.inverse_transform(Y_pred)
tmae = metrics.mean_absolute_error(Y_true, Y_pred)
tmape = mean_absolute_percentage_error(Y_true, Y_pred)
# print
print('%s model mae=%f, raw mae=%f, raw mape=%f'%(model_name, mae, tmae, tmape))
return y_pred, Y_pred, mae, tmae, tmape
#===================================================================================
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year):
inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
| 11,022 | 32.812883 | 106 | py |
rankpredictor | rankpredictor-master/src/indycar/online-lstm.py | from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import matplotlib
import numpy
from numpy import concatenate
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df = df.drop(0)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, yhat):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# Update LSTM model
def update_model(model, train, batch_size, updates):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
for i in range(updates):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
# run a repeated experiment
def experiment(repeats, series, updates, lag=1):
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, lag)
supervised_values = supervised.values
# split data into train and test-sets
trainSize = 1500
train, test = supervised_values[0:trainSize], supervised_values[trainSize:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# run experiment
error_scores = list()
for r in range(repeats):
# fit the base model
lstm_model = fit_lstm(train_scaled, 1, 50, 1)
print('Start testing...')
# forecast test dataset
train_copy = numpy.copy(train_scaled)
predictions = list()
for i in range(len(test_scaled)):
# update model
#if i > 0:
# update_model(lstm_model, train_copy, 1, updates)
# predict
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# add to training set
train_copy = concatenate((train_copy, test_scaled[i,:].reshape(1, -1)))
# report performance
rmse = sqrt(mean_squared_error(raw_values[-len(test_scaled):], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
return error_scores
# execute the experiment
def run(lag=1):
# load dataset
series = read_csv('indy2018-1-vspeed.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# experiment
repeats = 10
results = DataFrame()
# run experiment
updates = 2
results['results'] = experiment(repeats, series, updates, lag)
# summarize results
print(results.describe())
# save results
results.to_csv('experiment_update_2.csv', index=False)
# entry point
run()
| 5,148 | 33.326667 | 120 | py |
rankpredictor | rankpredictor-master/src/indycar/notebook.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras import models, layers
import time
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import os
def plot_examples(X,y,ypreds=None,nm_ypreds=None):
fig = plt.figure(figsize=(16,10))
fig.subplots_adjust(hspace = 0.32,wspace = 0.15)
count = 1
n_ts = X.shape[0]
num_per_row = 2 if n_ts > 2 else 1
for irow in range(n_ts):
ax = fig.add_subplot(int(n_ts+num_per_row -1)/num_per_row,num_per_row,count)
#ax.set_ylim(-0.5,0.5)
ax.plot(X[irow,:,0],"--",label="x1")
ax.plot(y[irow,:,:],marker='.',label="y",linewidth=3,alpha = 0.5)
ax.set_title("{:}th time series sample".format(irow))
if ypreds is not None:
for ypred,nm in zip(ypreds,nm_ypreds):
ax.plot(ypred[irow,:,:],marker='.',label=nm)
count += 1
plt.legend()
plt.show()
#if ypreds is not None:
# for y_pred, nm_ypred in zip(ypreds,nm_ypreds):
# loss = np.mean( (y_pred[:,D:,:].flatten() - y[:,D:,:].flatten())**2)
# print("The final validation loss of {} is {:7.6f}".format(
# nm_ypred,loss))
def plot_vectors(y,ypred,y_idx,nm_ypreds='pred'):
fig = plt.figure(figsize=(16,10))
fig.subplots_adjust(hspace = 0.32,wspace = 0.15)
count = 1
n_ts = y_idx.shape[0]
num_per_row = 2 if n_ts > 2 else 1
start, end = 0, 0
for irow in range(n_ts):
end += y_idx[irow]
ax = fig.add_subplot(int(n_ts+num_per_row -1)/num_per_row,num_per_row,count)
#ax.set_ylim(-0.5,0.5)
#ax.plot(X[irow,:,0],"--",label="x1")
ax.plot(y[start:end],marker='.',label="y",linewidth=3,alpha = 0.5)
ax.set_title("{:}th time series sample".format(irow))
#if ypreds is not None:
# for ypred,nm in zip(ypreds,nm_ypreds):
# ax.plot(ypred[start:end],marker='.',label=nm)
ax.plot(ypred[start:end],marker='.',label='pred')
count += 1
start = end
plt.legend()
plt.show()
#if ypreds is not None:
# for y_pred, nm_ypred in zip(ypreds,nm_ypreds):
loss = np.mean( np.abs(ypred.flatten() - y.flatten()))
print("The final validation loss MAE is {:7.6f}".format(loss))
# load multiple datasets
def load_data(datalist):
#add
db = []
lens = []
for id, f in enumerate(datalist):
data = pd.read_csv(f)
data['dbid'] = id + 1
db.append(data)
carNumber = len(set(data.car_number))
lens.append(carNumber)
print('load %s, len=%d'%(f, data.shape[0]))
alldata = None
for d in db:
#update car_number with the dbid
d['car_number'] += d['dbid'] * 1000
if alldata is None:
alldata = d
else:
alldata = alldata.append(d)
#scaler
scaler = MinMaxScaler()
alldata[['rank_diff_raw', 'time_diff_raw']] = alldata[['rank_diff', 'time_diff']]
alldata[['rank_diff', 'time_diff']] = scaler.fit_transform(alldata[['rank_diff', 'time_diff']])
return scaler, alldata, lens
def generate_data(dataset, D= 1, target='rank', shuffle = False):
# dataset with multiple events, car_number is encoded with event id
# T is the max len
carNumber = len(set(dataset.car_number))
T = 0
for car, group in dataset.groupby('car_number'):
T = max(T, group.shape[0])
print('carNumber = %d, max T =%d'%(carNumber, T))
#variable len of time series
x_train , y_train = [], []
for car, group in dataset.groupby('car_number'):
x = list(group.time_diff)
if target == 'rank':
y = list(group.rank_diff)
elif target =='time':
y = list(group.time_diff)
else:
print('error in target setting as', target)
return None
#get train/label
retlen = len(x) - D
if retlen <=0 :
print('error with record, too short, car = %d, len=%d'%(car,len(x)))
continue
#output
x_train.append(x[:retlen])
y_train.append(y[D:])
if len(x_train) != carNumber:
print('error in carNumber')
return x_train, y_train, x
#convert to np array
X = np.zeros((carNumber, T-D, 1))
Y = np.zeros((carNumber, T-D, 1))
W = np.zeros((carNumber, T-D))
for car in range(carNumber):
reclen = len(x_train[car])
X[car, :reclen, 0] = np.array(x_train[car])
Y[car, :reclen, 0] = np.array(y_train[car])
W[car, :reclen] = 1
if shuffle:
idx = np.random.permutation(carNumber)
X = X[idx]
Y = Y[idx]
W = W[idx]
return X, Y, W
def read_list(listfile):
datalist = []
with open(listfile, 'r') as inf:
for l in inf:
datalist.append(l.strip())
return datalist
#from sklearn.utils import check_arrays
def mean_absolute_percentage_error(y_true, y_pred):
#y_true, y_pred = check_arrays(y_true, y_pred)
## Note: does not handle mix 1d representation
#if _is_1d(y_true):
# y_true, y_pred = _check_1d_array(y_true, y_pred)
idx = y_true != 0
#return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
return np.mean(np.abs((y_true[idx] - y_pred[idx]) / y_true[idx])) * 100
def generate_feature_vectors(X,Y,W, vect_len = 10):
'''
X shape: <samples, series_len, 1>
Y shape: <samples, series_len, 1>
W shape: <samples, series_len>
vect_len: output feature vector length
return:
vect_x, vect_y
ts_idx
'''
ts_cnt = X.shape[0]
#ts_cnt = 1
vect_x = []
vect_y = []
ts_idx = []
for rid in range(ts_cnt):
ts_x = X[rid,:,0]
ts_y = Y[rid,:,0]
ts_len = int(np.sum(W[rid]))
if ts_len < vect_len:
#ts is not long enough, skip it
print('len[%d]=%d is too short, skip'%(rid, ts_len))
continue
#extract multiple feature vectors from this ts
ts_idx.append(ts_len - vect_len + 1)
for i in range(ts_len - vect_len + 1):
#not padding
vect_x.append(ts_x[i:i+vect_len])
vect_y.append(ts_y[i+vect_len-1])
return np.array(vect_x), np.array(vect_y), np.array(ts_idx)
def predict(model_name,model, x_test, y_test_in, scaler=None, target='time'):
#prediction
if model_name == 'lstm':
n,m = x_test.shape
y_pred = model.predict(x_test.reshape((n,m,1)))
else:
y_pred = model.predict(x_test)
#flatten y
y_test = y_test_in.copy().flatten()
y_pred = y_pred.flatten()
#mae
mae = metrics.mean_absolute_error(y_test, y_pred)
#inverse scale to get original values
tmae = 0.
if scaler is not None:
n = y_test.shape[0]
Y_true = np.zeros((n,2))
if target == 'time':
Y_true[:,1] = y_test
else:
Y_true[:,0] = y_test
Y_true = scaler.inverse_transform(Y_true)
Y_pred = np.zeros((n,2))
if target == 'time':
Y_pred[:,1] = y_pred.reshape((n))
else:
Y_pred[:,0] = y_pred.reshape((n))
Y_pred = scaler.inverse_transform(Y_pred)
tmae = metrics.mean_absolute_error(Y_true, Y_pred)
tmape = mean_absolute_percentage_error(Y_true, Y_pred)
# print
print('%s model mae=%f, raw mae=%f, raw mape=%f'%(model_name, mae, tmae, tmape))
return y_pred, Y_pred, mae, tmae, tmape
| 7,791 | 30.546559 | 99 | py |
rankpredictor | rankpredictor-master/src/indycar/model/evaluate-fulltest-fastrun.py | #!/usr/bin/env python
# coding: utf-8
# ## evaluate-fulltest-fastrun
#
# based on: evaluate-fulltest
#
# + support different models and test set
#
# + rank prediction directly
# + rank prediction by laptime2rank
# + laptime prediction
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
logger = logging.getLogger(__name__)
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
#MODE_NOPITAGE = 512
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[6]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
#dynamical/static feature configure
#FEATURE_CARID = 1
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
feature_mode = FEATURE_STATUS,
half_moving_win = 0,
train_ratio=0.8,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
#force
#run_ts = _run_ts
#test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[7]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[8]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank_bylaptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[9]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_cars = [],
datamode = MODE_ORACLE,models = ['oracle']):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,
trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
if _exp_id=='rank':
#rank prediction
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
0)
elif _exp_id=='laptime2rank':
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
elif _exp_id=='timediff2rank':
rank_ret, forecast_ret = eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length)
elif _exp_id=='laptime':
#laptime instead
rank_ret, forecast_ret = eval_laptime(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,get_modestr(datamode),trainid]
ret.extend(metrics[0])
retdf.append(ret)
rank_result[model] = (rank_ret,forecast_ret)
return pred_ret, ds_ret, rank_result, retdf
# In[10]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio = train_ratio
)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
#track_rec,lap_rec = test_rec['feat_dynamic_real']
dyna_feats = test_rec['feat_dynamic_real']
track_rec = dyna_feats[0]
lap_rec = dyna_feats[1]
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[11]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# ### init
# In[12]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
_dataset_id = 'indy2013-2018-nocarid'
#_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
# In[15]:
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# ### oracle test
# In[16]:
### test
plens=[2,5,10]
half=[0]
trainids = ["2018"]
#trainids = ["r0.5","r0.6"]
runs = 1
ref_testset = None
_context_ratio = 0.
train_ratio = 0.4
def mytest():
global ref_testset
#half=[True, False]
#plens=[2,5,10,20,30]
acc_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-contigency-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
ret_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-all-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
if os.path.exists(ret_output):
print(f'{ret_output} already exists, bye')
dfacc = pd.read_csv(acc_output)
dfret = pd.read_csv(ret_output)
return dfacc, dfret
config = {'oracle':
{# features in train and test
'fulloracle':MODE_ORACLE,
'notracklap':MODE_NOTRACK + MODE_NOLAP,
'laponly':MODE_ORACLE_LAPONLY,
'trackonly':MODE_ORACLE_TRACKONLY,
# features in test
'fullpred':MODE_PREDTRACK + MODE_PREDPIT,
'predtrack':MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,
'predpit':MODE_PREDPIT + MODE_ORACLE_LAPONLY,
'curtrack':MODE_TESTCURTRACK,
'zerotrack':MODE_TESTZERO
},
'deepAR':{'deepAR':MODE_ORACLE},
'naive':{'naive':MODE_ORACLE},
'zero':{'zero':MODE_ORACLE}
}
ref_testset = get_ref_oracle_testds(plens, half, train_ratio=train_ratio)
dfret, dfacc = dotest(config)
dfret.to_csv(ret_output, float_format='%.3f')
dfacc.to_csv(acc_output, float_format='%.3f')
#dfacc[dfacc['type']=='aa']
return dfacc, dfret
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'evaluate-fulltest-fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 68,055 | 35.354701 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar_simindy500.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(epochs=100):
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu",
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
estimatorSimple = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu",
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=64
)
)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
opt, args = parser.parse_args()
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 5,656 | 30.603352 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/prophet_laptime.py | import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import pickle
with open('sim-indy500-laptime-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
from gluonts.dataset.common import ListDataset
prediction_length = 50
freq = "5m"
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
from gluonts.model.prophet import ProphetPredictor
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
predictions = list(predictor.predict(test_ds))
| 1,440 | 32.511628 | 109 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_newfeature.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
# added new features
COL_LEADER_PITCNT = 9
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADERPITCNT = 8
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
print('Set a new global laptime_data')
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = {
FEATURE_STATUS:[track_rec,lap_rec],
FEATURE_PITAGE:[track_rec,lap_rec,pitage_rec],
FEATURE_LEADERPITCNT:[track_rec,lap_rec,rec[COL_LEADER_PITCNT,:endpos]]
}
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features[feature_mode]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = True
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples_old(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
laptime_data_save = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
laptime_data_save = laptime_data
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 158,233 | 36.222771 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/pitmodel.py | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
import inspect
from scipy import stats
from pathlib import Path
from sklearn.metrics import mean_squared_error
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.trainer import Trainer
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.model.forecast import SampleForecast
from indycar.model.mlp import MLPEstimator
import errno
class PitModelBase():
def __init__(self, modelfile=''):
self.model = {}
self.name = ''
self.keys = {}
if modelfile:
self.load_model(modelfile)
def load_model(self, modelfile):
if not os.path.exists(modelfile):
print('error loading pitmode at', modelfile)
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), modelfile)
with open(modelfile, 'rb') as f:
self.name, self.model = pickle.load(f, encoding='latin1')
print(f'init model:{self.name}, #key:', len(self.model))
def save_keys(self, keyfile):
with open(keyfile, 'wb') as f:
savedata = self.keys
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print(f'save keys to {keyfile}.')
def load_keys(self, keyfile):
with open(keyfile, 'rb') as f:
self.keys = pickle.load(f, encoding='latin1')
print(f'load {len(data)} keys from {keyfile}')
#self.keys = {}
#for d in data:
# self.keys[d] = 1
def save_model(self, modelname, test_ds, forecasts, scaler):
pass
def predict(self, *args):
pass
def forecast_ds(self, test_ds, forecasts):
"""
test_ds as testset, the unsclaed input
forecasts ; the template
"""
plen = len(test_ds)
sample_cnt = forecasts[0].samples.shape[0]
assert(plen == len(forecasts))
#build a new forecasts object
nf = []
for fc in forecasts:
nfc = SampleForecast(samples = np.zeros_like(fc.samples),
freq=fc.freq, start_date=fc.start_date)
nf.append(nfc)
for idx, rec in enumerate(test_ds):
feat = rec[1:]
onecast = np.zeros((sample_cnt))
for i in range(sample_cnt):
onecast[i] = self.predict(feat[0], feat[[1]])
nf[idx].samples = onecast
return nf
### pitmodel
class PitModelSimple(PitModelBase):
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
def __init__(self, modelfile='', top8 = False, retry = 10):
super().__init__(modelfile)
self.retry = retry
if top8:
self.model = self.pit_model_top8
else:
self.model = self.pit_model_all
def predict(self, *args):
retry = 0
caution_laps_instint = args[0]
laps_instint = args[1]
key = '-'.join([str(int(x)) for x in args])
self.keys[key] = 1
while retry < self.retry:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = random.choice(self.model[0])
else:
pred_pit_laps = random.choice(self.model[1])
if pred_pit_laps <= laps_instint:
retry += 1
if retry == 10:
pred_pit_laps = laps_instint + 1
continue
else:
break
return pred_pit_laps
class PitModelMLP(PitModelBase):
"""
<caution_lap, pitage> -> [distribution]
distribution := sorted cdf [val:probability, val2:p2, ...]
[0,:] -> val
[1,:] -> cdf p
no scaler, raw feat and target
"""
def __init__(self, modelfile=''):
super().__init__(modelfile)
def save_model(self, modelname, test_ds, forecasts, scaler):
model = {}
#get the sclaer for the first column(lap2nextpit)
sc, scf = '', ''
if isinstance(scaler, StandardScaler):
sc = StandardScaler()
sc.scale_ = scaler.scale_[0]
sc.mean_ = scaler.mean_[0]
sc.var_ = scaler.var_[0]
scf = StandardScaler()
scf.scale_ = scaler.scale_[1:]
scf.mean_ = scaler.mean_[1:]
scf.var_ = scaler.var_[1:]
for idx, rec in enumerate(test_ds):
feat = rec[1:]
key = '-'.join([str(int(x)) for x in feat])
if not key in model:
samples = forecasts[idx].samples.reshape(-1)
if not isinstance(sc, str):
samples = sc.inverse_transform(samples)
#force to prediction to be valid lap2nextpit
samples = samples.astype(int)
samples = samples[samples > 0]
#
valset = set(list(samples))
plen = len(valset)
distr = np.zeros((2, plen))
distr[0, :] = sorted(valset)
smap = {val:id for id, val in enumerate(distr[0, :])}
for s in samples:
distr[1,smap[s]] += 1
tsum = np.sum(distr[1,:])
distr[1, :] /= tsum
distr[1, :] = np.cumsum(distr[1, :])
model[key] = distr
#save model
self.model = model
self.name = modelname
with open(modelname, 'wb') as f:
savedata = [self.name, self.model]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print(f'save model {modelname} with {len(self.model)} keys.')
def predict(self, *args):
key = '-'.join([str(int(x)) for x in args])
#if key in self.model:
try:
distr = self.model[key]
#[0, 1.)
p = np.random.random()
i = np.sum(distr[1,:] < p)
# return totallen
return int(distr[0,i]) + args[1]
except:
#exception
#todo, backto special model
print(f'ERROR: key {key} not found in model')
return 1 + args[1]
| 7,464 | 30.49789 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_copy1.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
nextpit.append(np.nan)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
return forecasts_et
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
#_debug_carlist
if 12 in nextpit and 12 in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
debugstr = f'nextpit: {nextpit[12]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
acc = len(correct)/len(top1_pred)
rmse = mean_squared_error(df['pred_endrank'].values , df['endrank'].values)
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 141,581 | 36.465467 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint-predictor-fastrun.py | #!/usr/bin/env python
# coding: utf-8
# ## Stint-Predictor-Fastrun
#
# based on: LongTerm-Predictor
#
# long term predictor by continuously regressive forecasting
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# In[11]:
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
def get_stint_acc_old(forecasts, trim=2):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# ### init
# In[15]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 95,368 | 36.650612 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/gluonts_models_gpuonly.py | #!/usr/bin/env python
# coding: utf-8
"""
Gluonts Models on the Indy dataset
dataset:
freq, prediction_length, cardinality,train_ds, test_ds
models:
1. classical models
naive,
arima, ets, prophet
2. deep models
deepAR, deepstate, deepFactor
deepAR-Oracle
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWEstimator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile):
global freq, prediction_length, cardinality
with open(inputfile, 'rb') as f:
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
logger.info(f"number of cars: {cardinality}")
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = context_length
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
#forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
ts_entry[idx].iloc[-plot_length:,0].plot(ax=axs) # plot the time series
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model_old(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model_uni(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
#convert to univariate format
# tss: <ts_len, #feature>
# forecasts.sample: < 100, prediction_length, #feature>
#tss_n = []
#for ts in tss:
# tse = ts.to_numpy()
# tss_n.append(tse[:,0].reshape((tse.shape[0])))
#cast_n = []
#for fc in forecasts:
# nfc = fc
# fcs = fc.samples.shape
# nsamples = fc.samples[:,:,0].reshape((fcs[0], fcs[1]))
# nfc.samples = nsamples
# cast_n.append(nfc)
#tss = tss_n
#forecasts = cast_n
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#debug
#print(f'ts_entry shape:{ts_entry[0].shape}, forecast:{forecast_entry[0].samples.shape}')
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
#evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW':
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--batch_size", dest="batch_size", default=32)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--nosave", dest="nosave", action="store_true", default=False)
parser.add_option("--evalmode", dest="evalmode", action="store_true", default=False)
parser.add_option("--distr_output", dest="distr_output", default='student')
parser.add_option("--nocarid", dest="nocarid", action="store_true", default=False)
#obsolete
parser.add_option("--mode", dest="mode", default='train')
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
#ts_type = int(opt.ts_type)
#train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
train_ds, test_ds = load_dataset(opt.inputfile)
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
logger.info('target_dim:%s', target_dim)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{prediction_length}-c{opt.contextlen}-f{freq}-dim{target_dim}-dstr{opt.distr_output}'
logger.info("runid=%s", runid)
# train
classical_models = ['ets', 'arima', 'prophet', 'naive']
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
if opt.distr_output in distr_outputs:
distr_output = distr_outputs[opt.distr_output]
else:
logger.error('output distr no found:%s', opt.distr_output)
exit(-1)
use_feat_static = True
if opt.nocarid:
use_feat_static = False
estimator = init_estimator(opt.model, opt.gpuid,
opt.epochs, opt.batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
if opt.evalmode == False:
if opt.model in classical_models:
predictor = estimator
else:
predictor = estimator.train(train_ds)
if not opt.nosave:
if not os.path.exists(opt.outputfile):
os.mkdir(opt.outputfile)
logger.info('Start to save the model to %s', opt.outputfile)
predictor.serialize(Path(opt.outputfile))
logger.info('End of saving the model.')
else:
if not os.path.exists(opt.outputfile):
logger.error(f'error:{outputfile} not exists')
exit(-1)
logger.info('Start to load the model from %s', opt.outputfile)
predictor = Predictor.deserialize(Path(opt.outputfile))
logger.info('End of loading the model.')
# evaluate
#if opt.multi!=0:
if target_dim > 1:
logger.info('Start MultivariateEvaluator')
evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
else:
logger.info('Start Evaluator')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
evaluate_model(predictor, evaluator, test_ds, opt.outputfile)
#evaluate_model_uni(predictor, evaluator, test_ds, opt.outputfile)
| 18,983 | 34.886578 | 146 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepmodels_indy.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy dataset
dataset:
laptime&rank dataset <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
test_length = 50
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
global_carids = {}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile, run_ts = TS_LAPTIME):
global global_carids, cardinality
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
cardinality = [len(global_carids)]
logger.info(f"number of cars: {cardinality}")
logger.info(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
_train = [{'target': _data[run_ts][rowid, :-test_length].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
_test = [{'target': _data[run_ts][rowid, :].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
# train dataset: cut the last window of length "test_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
#if not os.path.exists(outputfile):
# os.mkdir(outputfile)
#predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
offset = 52-7
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--predictionlen", dest="predictionlen", default=50)
parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--ts", dest="ts_type", default=2)
opt, args = parser.parse_args()
#set the global length
prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
test_length = int(opt.testlen)
ts_type = int(opt.ts_type)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{opt.predictionlen}-c{opt.contextlen}-t{opt.testlen}-ts{opt.ts_type}'
logger.info("runid=%s", runid)
train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 8,731 | 32.328244 | 130 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_strategy.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
nextpit.append(np.nan)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
# difference test on pit strategy
_pitstrategy_testcar = 12
_pitstrategy_lowmode = True
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
#pit_model = pit_model_all
pit_model = pit_model_top8
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
if carno == _pitstrategy_testcar:
# check strategy for test car
if _pitstrategy_lowmode:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = min(pit_model[0])
else:
pred_pit_laps = min(pit_model[1])
else:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = max(pit_model[0])
else:
pred_pit_laps = max(pit_model[1])
else:
retry = 0
while retry < 10:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
if pred_pit_laps <= laps_instint:
retry += 1
if retry == 10:
pred_pit_laps = laps_instint + 1
continue
else:
break
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
debug_report(f'simu_onestep: {startlap}-{endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
return forecasts_et
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
#_debug_carlist
if 12 in nextpit and 12 in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
debugstr = f'nextpit: {nextpit[12]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset with {len(laptime_data)} races, {len(global_carids)} cars')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 138,147 | 36.611762 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_modules_transformerokay.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from gluonts.model.transformer import TransformerEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
#print('skip this event:', events[_data[0]])
print('skip this event:', _data[0])
break
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
| 99,202 | 32.765487 | 194 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_modules_inctrain.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
from indycar.model.deepar import DeepAREstimator
from indycar.model.transformerw import TransformerWeightedEstimator
from indycar.model.transformerf import TransformerFullLossEstimator
from indycar.model.transformerwf import TransformerWeightedFullLossEstimator
from indycar.model.transformerwfm import TransformerWeightedFullLossMaskedEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from gluonts.model.transformer import TransformerEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
if len(this_lap) == 0:
continue
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
#totallaps = len(laplist)
totallaps = max(laplist) + 1
print('totallaps:', event, totallaps, len(laplist))
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status, totallaps-1)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#if events[_data[0]] == test_event:
test_mode = False
if _data[0] == test_eventid:
test_mode = True
#elif _data[0] in train_events:
# test_mode = False
#else:
# #skip this event
# print('skip this event:', _data[0])
# continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
total_test_rec_cnt = 0
total_train_rec_cnt = 0
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if _data[0] == test_eventid:
test_mode = True
elif _data[0] in train_events:
test_mode = False
else:
#skip this event
print('skip this event:', _data[0])
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
# estimate the record count
total_train_rec_cnt += totallen -gvar.context_length + 1
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
real_features = get_real_features(feature_mode, rec, context_len)
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
# estimate the record count
total_train_rec_cnt += context_len - gvar.context_length + 1
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
total_test_rec_cnt += test_rec_cnt
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}, trainreccnt:{total_train_rec_cnt}, testreccnt:{total_test_rec_cnt}', flush=True)
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=gvar.learning_rate,
patience = gvar.patience,
#hybridize=False,
num_batches_per_epoch=100
)
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepARW-Oracle' or model == 'RankNet':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerW-Oracle':
if use_feat_static:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
model_dim=30,
num_heads=6,
trainer=trainer
)
else:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
model_dim=28,
num_heads=7,
trainer=trainer
)
elif model == 'TransformerWF-Oracle' or model == 'RankNet-Transformer':
if use_feat_static:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerWFM-Oracle':
if use_feat_static:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
#model_dim=28,
#num_heads=7,
trainer=trainer
)
elif model == 'TransformerF-Oracle':
if use_feat_static:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'deepARW-multi' or model == 'RankNet-Joint':
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = context_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode, verbose=False)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start(bydf):', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
#ax.set_xlim((0,200))
ax.set_xlim((0,gvar.maxlap))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=gvar.freq) + gvar.prediction_length
date_index = pd.date_range(start, periods = len(sv) - gvar.prediction_length, freq = gvar.freq)
df2 = pd.DataFrame(sv[:- gvar.prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus_all(rankdata):
df12 = rankdata
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
pitlaps = sorted(set(list(pits[:,0].astype(int))))
cautionidx = np.where(caution == 1)
cautions = data[cautionidx]
cautionlaps = sorted(set(list(cautions[:,0].astype(int))))
return pitlaps, cautionlaps
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
# save to the endlap
#curlap = int(dfrec.startlap.values[0])
curlap = int(dfrec.endlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
def get_event_info(event):
#eid = event.split('-')[0]
return gvar._race_info[event]
| 103,231 | 32.615109 | 199 | py |
rankpredictor | rankpredictor-master/src/indycar/model/evaluate_fulltest_fastrun_v0.py | #!/usr/bin/env python
# coding: utf-8
# ## evaluate-fulltest-fastrun
#
# based on: evaluate-fulltest
#
# + support different models and test set
#
# + rank prediction directly
# + rank prediction by laptime2rank
# + laptime prediction
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
logger = logging.getLogger(__name__)
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
#MODE_NOPITAGE = 512
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[6]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
#dynamical/static feature configure
#FEATURE_CARID = 1
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_TRACKONLY = 8
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
feature_mode = FEATURE_STATUS,
half_moving_win = 0,
train_ratio=0.8,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
#force
#run_ts = _run_ts
#test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
elif feature_mode == FEATURE_TRACKONLY:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[7]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[8]:
def load_model(model_name,trainid, prediction_length):
with mx.Context(mx.gpu(7)):
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW':
model=f'deepARW-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank_bylaptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[9]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_cars = [],
datamode = MODE_ORACLE,models = ['oracle']):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,
trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
if _exp_id=='rank':
#rank prediction
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
0)
elif _exp_id=='laptime2rank':
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
elif _exp_id=='timediff2rank':
rank_ret, forecast_ret = eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length)
elif _exp_id=='laptime':
#laptime instead
rank_ret, forecast_ret = eval_laptime(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,get_modestr(datamode),trainid]
ret.extend(metrics[0])
retdf.append(ret)
rank_result[model] = (rank_ret,forecast_ret)
return pred_ret, ds_ret, rank_result, retdf
# In[10]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio = train_ratio
)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
#track_rec,lap_rec = test_rec['feat_dynamic_real']
dyna_feats = test_rec['feat_dynamic_real']
track_rec = dyna_feats[0]
lap_rec = dyna_feats[1]
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[11]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# ### init
# In[12]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
_dataset_id = 'indy2013-2018-nocarid'
#_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}'
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
# In[15]:
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# ### oracle test
# In[16]:
### test
plens=[2,5,10]
half=[0]
trainids = ["2018"]
#trainids = ["r0.5","r0.6"]
runs = 1
ref_testset = None
_context_ratio = 0.
train_ratio = 0.4
def mytest():
global ref_testset
#half=[True, False]
#plens=[2,5,10,20,30]
acc_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-contigency-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
ret_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-all-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
if os.path.exists(ret_output):
print(f'{ret_output} already exists, bye')
dfacc = pd.read_csv(acc_output)
dfret = pd.read_csv(ret_output)
return dfacc, dfret
config = {'oracle':
{# features in train and test
'fulloracle':MODE_ORACLE,
'notracklap':MODE_NOTRACK + MODE_NOLAP,
'laponly':MODE_ORACLE_LAPONLY,
'trackonly':MODE_ORACLE_TRACKONLY,
# features in test
'fullpred':MODE_PREDTRACK + MODE_PREDPIT,
'predtrack':MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,
'predpit':MODE_PREDPIT + MODE_ORACLE_LAPONLY,
'curtrack':MODE_TESTCURTRACK,
'zerotrack':MODE_TESTZERO
},
'deepAR':{'deepAR':MODE_ORACLE},
'naive':{'naive':MODE_ORACLE},
'zero':{'zero':MODE_ORACLE}
}
ref_testset = get_ref_oracle_testds(plens, half, train_ratio=train_ratio)
dfret, dfacc = dotest(config)
dfret.to_csv(ret_output, float_format='%.3f')
dfacc.to_csv(acc_output, float_format='%.3f')
#dfacc[dfacc['type']=='aa']
return dfacc, dfret
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'evaluate-fulltest-fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 73,154 | 36.210071 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_modules.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
from indycar.model.deepar import DeepAREstimator
from indycar.model.transformerw import TransformerWeightedEstimator
from indycar.model.transformerf import TransformerFullLossEstimator
from indycar.model.transformerwf import TransformerWeightedFullLossEstimator
from indycar.model.transformerwfm import TransformerWeightedFullLossMaskedEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from gluonts.model.transformer import TransformerEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
if len(this_lap) == 0:
continue
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
#totallaps = len(laplist)
totallaps = max(laplist) + 1
print('totallaps:', event, totallaps, len(laplist))
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status, totallaps-1)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#if events[_data[0]] == test_event:
test_mode = False
if _data[0] == test_eventid:
test_mode = True
#elif _data[0] in train_events:
# test_mode = False
#else:
# #skip this event
# print('skip this event:', _data[0])
# continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if _data[0] == test_eventid:
test_mode = True
elif _data[0] in train_events:
test_mode = False
else:
#skip this event
print('skip this event:', _data[0])
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=gvar.learning_rate,
patience = gvar.patience,
#hybridize=False,
num_batches_per_epoch=100
)
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepARW-Oracle' or model == 'RankNet':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerW-Oracle':
if use_feat_static:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerWF-Oracle' or model == 'RankNet-Transformer':
if use_feat_static:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerWFM-Oracle':
if use_feat_static:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
elif model == 'TransformerF-Oracle':
if use_feat_static:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'deepARW-multi' or model == 'RankNet-Joint':
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = context_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode, verbose=False)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
#ax.set_xlim((0,200))
ax.set_xlim((0,gvar.maxlap))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=gvar.freq) + gvar.prediction_length
date_index = pd.date_range(start, periods = len(sv) - gvar.prediction_length, freq = gvar.freq)
df2 = pd.DataFrame(sv[:- gvar.prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus_all(rankdata):
df12 = rankdata
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
pitlaps = sorted(set(list(pits[:,0].astype(int))))
cautionidx = np.where(caution == 1)
cautions = data[cautionidx]
cautionlaps = sorted(set(list(cautions[:,0].astype(int))))
return pitlaps, cautionlaps
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
# save to the endlap
#curlap = int(dfrec.startlap.values[0])
curlap = int(dfrec.endlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
def get_event_info(event):
#eid = event.split('-')[0]
return gvar._race_info[event]
| 102,604 | 32.640984 | 194 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_savedata.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_test_carlist = []
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#save data for car in the debug list only
if not carno in _test_carlist:
continue
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 1):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
_use_mean = False # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, \n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2,
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 154,718 | 36.290672 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_simulator_beforesharegvar.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: stint_simulator_shortterm_pitmodel.py
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
COL_TARGET_PREDICTED = 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def update_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1,
#target_pred = None,
rank_col = COL_RANK,
verbose = False):
"""
update the features in laptime data
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
global laptime_data
#inplace update
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if events[_data[0]] == _test_event:
test_idx = idx
break
# check shift len
if shift_len < 0:
shift_len = prediction_length
if verbose:
print('update_laptimedata shift len:', shift_len, test_idx)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
if test_idx >= 0:
_data = laptime_data[test_idx]
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if verbose:
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
# add new features
# add leaderPitCnt
#if _data[0]==0:
# verbose = True
#else:
# verbose = False
verbose = False
#
# be careful on leader_cnt for the future rank leaking
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
#if not target_pred:
# # update leader_cnt by predicted target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
# rank_col = COL_TARGET_PREDICTED,
# dest_col=dest_col, verbose = verbose)
#else:
# # update leader_cnt by true target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
rank_col = rank_col,
dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
#new_data.append([_data[0], _data[1], data2_newfeature])
laptime_data[test_idx][2] = data2_newfeature
return laptime_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if events[_data[0]] == _test_event:
test_idx = idx
break
print('Set a new global laptime_data, shape=', len(newdata), newdata[test_idx][2].shape)
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
# pit model is separate for each car
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle' or (model_name.find('pitmodel') == 0):
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'joint' or model_name == 'deepAR-multi':
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# transformer
elif model_name == 'transformer' or model_name == 'Transformer':
model=f'Transformer-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'Transformer-MLP' or model_name == 'Transformer-Oracle':
model=f'Transformer-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR' or model_name == 'standard':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
#model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
extend laptime data space to COL_ENDPOS
save the lapstatus in laptime_data
"""
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if events[_data[0]] == _test_event:
test_idx = idx
break
print('sim_init: input laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape, test_idx)
#update this laptime record
if test_idx >= 0:
_data = laptime_data[test_idx][2]
dim1, dim2, dim3 = _data.shape
if dim2 < COL_ENDPOS:
#create a new data
newmat = np.zeros((dim1, COL_ENDPOS, dim3))
newmat[:,:dim2,:] = _data.copy()
else:
newmat = _data
#save pit model related features
newmat[:,COL_TRACKSTATUS_SAVE,:] = newmat[:,COL_TRACKSTATUS, :]
newmat[:,COL_LAPSTATUS_SAVE,:] = newmat[:,COL_LAPSTATUS, :]
newmat[:,COL_CAUTION_LAPS_INSTINT_SAVE,:] = newmat[:,COL_CAUTION_LAPS_INSTINT, :]
newmat[:,COL_LAPS_INSTINT_SAVE, :] = newmat[:,COL_LAPS_INSTINT, :]
# reset
if dim2 < COL_ENDPOS:
laptime_data[test_idx][2] = newmat
print('sim_init: after laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape)
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_TRACKSTATUS,:] = 0
rec[COL_LAPSTATUS,:] = 0
rec[COL_TRACKSTATUS,:endpos] = rec[COL_TRACKSTATUS_SAVE, :endpos]
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint) + _pitmodel_bias
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
global laptime_data
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
carno2rowid = {}
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#save to carno2rowid map
if carno not in carno2rowid:
carno2rowid[carno] = rowid
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# prepare TARGET_PREDICTED in laptime
_data[2][rowid][COL_TARGET_PREDICTED, :] = np.nan
_data[2][rowid][COL_TARGET_PREDICTED, :totallen] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
# ground truth in forecasts_et, (RANK only)
#target_cols = [run_ts, COL_LAPSTATUS]
target_cols = [2, 0]
#target_val = rec[target_cols].copy().astype(np.float32)
target_val = forecasts_et[carno][target_cols,:endpos].astype(np.float)
_test.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
else:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq,one_dim_target= False if _joint_train else True)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _joint_train:
#
# joint train , multi dimensional target
# samples – Array of size (num_samples, prediction_length) (1D case) or (num_samples, prediction_length, target_dim)
#
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1,0].reshape(-1)
else:
# 1 dimensional target
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1].reshape(-1)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
# update laptime_data
rowid = carno2rowid[carno]
_data[2][rowid][COL_TARGET_PREDICTED,len(tss[idx]) - prediction_length:len(tss[idx])] = forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])]
#debug
if False:
#if carno==13:
#print('samples shape:', forecasts[idx].samples.shape)
print('tss shape:', tss[idx].shape, 'endpos:', endpos)
print('forecast mean:', forecast_laptime_mean, len(tss[idx]) - prediction_length)
print('target true:', forecasts_et[carno][1, len(tss[idx]) - prediction_length:len(tss[idx])])
print('target pred:', forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])])
#save the samples, the farest samples
#forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
forecasts_samples[carno][:] = forecasts_furtherest_samples
#update laptimedata by new predictions
#save predictions into laptime data
# update featues inlaptime data
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length,
rank_col = COL_TARGET_PREDICTED
)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
global laptime_data
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
#debug joint
#if True:
# xmat = forecasts_et[13][:, pitlap:pitlap+prediction_length]
# print('debug forecasts_et at ', pitlap)
# print(xmat)
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_test_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# joint train the target of (rank, lapstatus)
_joint_train = False
_pitmodel_bias = 0
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
laptime_data_save = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
_trim = 0
def init(pitmodel = '', pitmodel_bias = 0):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
laptime_data_save = laptime_data
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel_bias = pitmodel_bias
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 85,424 | 33.76801 | 196 | py |
rankpredictor | rankpredictor-master/src/indycar/model/ZeroPredictor.py | #!/usr/bin/env python
# coding: utf-8
import mxnet as mx
from mxnet import gluon
import numpy as np
import json
from typing import Callable, Dict, Iterator, NamedTuple, Optional, List
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.model.predictor import Predictor
from gluonts.model.forecast import SampleForecast
class ZeroPredictor(Predictor):
@validated()
def __init__(self,
freq: str,
prediction_length: int) -> None:
self.prediction_length=prediction_length
self.freq = freq
def predict(
self, dataset: Dataset, num_samples: int = 100, **kwargs
) -> Iterator[SampleForecast]:
for entry in dataset:
train_length = len(entry["target"])
prediction_length = self.prediction_length
start = entry["start"]
target = entry["target"]
feat_dynamic_real = entry.get("feat_dynamic_real", [])
#forecast_samples = self._run_prophet(data, params)
#target_dim = target.shape[0] if len(target.shape) > 1 else 1
if len(target.shape) > 1:
#multivariate
target_dim = target.shape[0]
target_len = target.shape[1]
else:
target_dim = 1
target_len = target.shape[0]
if target_dim ==1 :
forecast_samples = np.zeros((num_samples, prediction_length))
#navie prediction with the last status of target
#forecast_samples[:] = target[-prediction_length]
#forecast_samples[:] = target[-1]
forecast_samples[:] = 0
else:
forecast_samples = np.zeros((num_samples, prediction_length, target_dim))
#forecast_samples[:,:] = target[-prediction_length]
#forecast_samples[:,:] = target[-1]
forecast_samples[:,:] = 0
yield SampleForecast(
samples=forecast_samples,
start_date=start + target_len,
freq=self.freq,
)
| 2,187 | 32.151515 | 89 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_beforeaddnewfeature.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples_old(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 157,786 | 36.284263 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_simulator.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: stint_simulator_shortterm_pitmodel.py
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
#from gluonts.model.deepar import DeepAREstimator
from indycar.model.deepar import DeepAREstimator
import indycar.model.global_variables as gvar
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
COL_TARGET_PREDICTED = 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def update_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1,
#target_pred = None,
rank_col = COL_RANK,
verbose = False):
"""
update the features in laptime data
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
global laptime_data
#inplace update
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
# check shift len
if shift_len < 0:
shift_len = prediction_length
if verbose:
print('update_laptimedata shift len:', shift_len, test_idx)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
if test_idx >= 0:
_data = laptime_data[test_idx]
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if verbose:
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
# add new features
# add leaderPitCnt
#if _data[0]==0:
# verbose = True
#else:
# verbose = False
verbose = False
#
# be careful on leader_cnt for the future rank leaking
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
#if not target_pred:
# # update leader_cnt by predicted target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
# rank_col = COL_TARGET_PREDICTED,
# dest_col=dest_col, verbose = verbose)
#else:
# # update leader_cnt by true target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
rank_col = rank_col,
dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
#new_data.append([_data[0], _data[1], data2_newfeature])
laptime_data[test_idx][2] = data2_newfeature
return laptime_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
if test_idx >= 0:
print('Set a new global laptime_data, test_event=%s, cnt=%d, shape=%s'%(_test_event, len(newdata), newdata[test_idx][2].shape))
else:
print('Error, test event not found in laptimedata', _test_event)
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
# pit model is separate for each car
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'deepAR-Oracle' or model_name == 'deepAR-MLP':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle' or model_name == 'deepARW-Oracle' or model_name == 'deepARW-MLP':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'oracle' or (model_name.find('pitmodel') == 0):
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'deepAR-multi':
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'joint' or model_name == 'deepARW-multi' or model_name == 'RankNet-Joint':
model=f'deepARW-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# transformer
elif model_name == 'transformer' or model_name == 'Transformer':
model=f'Transformer-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'Transformer-MLP' or model_name == 'Transformer-Oracle':
model=f'Transformer-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerW-MLP' or model_name == 'TransformerW-Oracle':
model=f'TransformerW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerF-MLP' or model_name == 'TransformerF-Oracle':
model=f'TransformerF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerWF-MLP' or model_name == 'TransformerWF-Oracle':
model=f'TransformerWF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerWFM-MLP' or model_name == 'TransformerWFM-Oracle':
model=f'TransformerWFM-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR' or model_name == 'standard':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
#model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=gvar.context_length)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
extend laptime data space to COL_ENDPOS
save the lapstatus in laptime_data
"""
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
print('sim_init: input laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape, test_idx)
#update this laptime record
if test_idx >= 0:
_data = laptime_data[test_idx][2]
dim1, dim2, dim3 = _data.shape
if dim2 < COL_ENDPOS:
#create a new data
newmat = np.zeros((dim1, COL_ENDPOS, dim3))
newmat[:,:dim2,:] = _data.copy()
else:
newmat = _data
#save pit model related features
newmat[:,COL_TRACKSTATUS_SAVE,:] = newmat[:,COL_TRACKSTATUS, :]
newmat[:,COL_LAPSTATUS_SAVE,:] = newmat[:,COL_LAPSTATUS, :]
newmat[:,COL_CAUTION_LAPS_INSTINT_SAVE,:] = newmat[:,COL_CAUTION_LAPS_INSTINT, :]
newmat[:,COL_LAPS_INSTINT_SAVE, :] = newmat[:,COL_LAPS_INSTINT, :]
# reset
if dim2 < COL_ENDPOS:
laptime_data[test_idx][2] = newmat
print('sim_init: after laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape)
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
#check the test_event, the same as the training event?a
pitmodel_trainevent = gvar.trainrace
eid = _test_event.split('-')[0]
pitscale = gvar.events_info[pitmodel_trainevent][1] *1.0 / gvar.events_info[eid][1]
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno, pitscale = pitscale)
_pitmodel = None
def update_onets(rec, startlap, carno, pitscale = 1.):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_TRACKSTATUS,:] = 0
rec[COL_LAPSTATUS,:] = 0
rec[COL_TRACKSTATUS,:endpos] = rec[COL_TRACKSTATUS_SAVE, :endpos]
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
#scale
if pitscale != 1.0:
caution_laps_instint = int(caution_laps_instint / pitscale)
laps_instint = int(laps_instint / pitscale)
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint) + _pitmodel_bias
#update by pitscale
pred_pit_laps = int(pred_pit_laps * pitscale)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
global laptime_data
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
carno2rowid = {}
_test = []
for _data in _laptime_data:
if gvar.events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'{endpos} {endlap} {_data[2].shape} ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#save to carno2rowid map
if carno not in carno2rowid:
carno2rowid[carno] = rowid
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# prepare TARGET_PREDICTED in laptime
_data[2][rowid][COL_TARGET_PREDICTED, :] = np.nan
_data[2][rowid][COL_TARGET_PREDICTED, :totallen] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
# ground truth in forecasts_et, (RANK only)
#target_cols = [run_ts, COL_LAPSTATUS]
target_cols = [2, 0]
#target_val = rec[target_cols].copy().astype(np.float32)
target_val = forecasts_et[carno][target_cols,:endpos].astype(np.float)
_test.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
else:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
#jump out
# keep _data as current
break
# end of for each ts
#if not _test:
# #error in dataset
# print('Error: empty _test')
# import pdb
# pdb.set_trace()
# break
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq,one_dim_target= False if _joint_train else True)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _joint_train:
#
# joint train , multi dimensional target
# samples – Array of size (num_samples, prediction_length) (1D case) or (num_samples, prediction_length, target_dim)
#
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1,0].reshape(-1)
else:
# 1 dimensional target
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1].reshape(-1)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
# update laptime_data
rowid = carno2rowid[carno]
_data[2][rowid][COL_TARGET_PREDICTED,len(tss[idx]) - prediction_length:len(tss[idx])] = forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])]
#debug
if False:
#if carno==13:
#print('samples shape:', forecasts[idx].samples.shape)
print('tss shape:', tss[idx].shape, 'endpos:', endpos)
print('forecast mean:', forecast_laptime_mean, len(tss[idx]) - prediction_length)
print('target true:', forecasts_et[carno][1, len(tss[idx]) - prediction_length:len(tss[idx])])
print('target pred:', forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])])
#save the samples, the farest samples
#forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
forecasts_samples[carno][:] = forecasts_furtherest_samples
#update laptimedata by new predictions
#save predictions into laptime data
# update featues inlaptime data
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length,
rank_col = COL_TARGET_PREDICTED
)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
global laptime_data
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
#if pitlap == 124:
# import pdb
# pdb.set_trace()
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = verbose
)
#pocono-2019
#end with nan, totallen < expected endpos
if not forecast:
break
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
#debug joint
#if True:
# xmat = forecasts_et[13][:, pitlap:pitlap+prediction_length]
# print('debug forecasts_et at ', pitlap)
# print(xmat)
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), gvar.maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((gvar.maxlap))
full_samples[carno] = np.zeros((samplecnt, gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def init(laptimefile, pitmodel = '', pitmodel_bias = 0):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global _inlap_status
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in gvar.events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
#laptimefile = f'laptime_rank_timediff_pit-oracle-{gvar.dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel_bias = pitmodel_bias
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
correct = df[df['sign']==df['pred_sign']]
signacc = len(correct)/len(df)
naive_signcorrect = df[df['sign'] == 0]
naive_signacc = len(naive_signcorrect) / len(df)
print('testset size:', len(df), 'minlap:', minlap, 'maxlap:', maxlap)
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1_pred: {%d}, top1_naive: {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1: {%d}'%(
acc, mae, rmse, r2, len(top1_pred), len(top1_naive),
acc_naive, mae_naive, rmse_naive, r2_naive, len(top1)
)
)
return np.array([[acc, mae, rmse, r2, signacc],[acc_naive, mae_naive, rmse_naive, r2_naive, naive_signacc]])
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_test_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# joint train the target of (rank, lapstatus)
_joint_train = False
_pitmodel_bias = 0
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
_trim = 0
# turn to use gvar
#years = ['2013','2014','2015','2016','2017','2018','2019']
#events = [f'Indy500-{x}' for x in years]
#events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
_lags_seq = [1]
| 85,855 | 33.929211 | 230 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_newfeatureoraclepass.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
# added new features
COL_LEADER_PITCNT = 9
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADERPITCNT = 8
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
print('Set a new global laptime_data')
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = {
FEATURE_STATUS:[track_rec,lap_rec],
FEATURE_PITAGE:[track_rec,lap_rec,pitage_rec],
FEATURE_LEADERPITCNT:[track_rec,lap_rec,rec[COL_LEADER_PITCNT,:endpos]]
}
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features[feature_mode]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = True
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples_old(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
laptime_data_save = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
laptime_data_save = laptime_data
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 158,233 | 36.222771 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/evaluate_fulltest_fastrun_paper.py | #!/usr/bin/env python
# coding: utf-8
# ## evaluate-fulltest-fastrun
#
# based on: evaluate-fulltest
#
# + support different models and test set
#
# + rank prediction directly
# + rank prediction by laptime2rank
# + laptime prediction
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
logger = logging.getLogger(__name__)
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
#MODE_NOPITAGE = 512
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[6]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
#dynamical/static feature configure
#FEATURE_CARID = 1
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_TRACKONLY = 8
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
feature_mode = FEATURE_STATUS,
half_moving_win = 0,
train_ratio=0.8,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
#force
#run_ts = _run_ts
#test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
elif feature_mode == FEATURE_TRACKONLY:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[7]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[8]:
def load_model(model_name,trainid, prediction_length):
with mx.Context(mx.gpu(7)):
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW':
model=f'deepARW-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank_bylaptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc_ex(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
reccnt = 0
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
mae += np.sum(np.abs(predRank - trueRank))
reccnt += len(trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae / reccnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
#return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
return ((top1acc_farmost,mae, top1acc, top5acc,top5acc_farmost,tau,rmse),
(recnt, recnt, recnt*prediction_length,5*recnt*prediction_length,5*recnt,recnt,recnt))
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[9]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_cars = [],
datamode = MODE_ORACLE,models = ['oracle']):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,
trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
if _exp_id=='rank':
#rank prediction
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
0)
elif _exp_id=='laptime2rank':
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
elif _exp_id=='timediff2rank':
rank_ret, forecast_ret = eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length)
elif _exp_id=='laptime':
#laptime instead
rank_ret, forecast_ret = eval_laptime(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,get_modestr(datamode),trainid]
ret.extend(metrics[0])
retdf.append(ret)
rank_result[model] = (rank_ret,forecast_ret)
return pred_ret, ds_ret, rank_result, retdf
# In[10]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio = train_ratio
)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
#track_rec,lap_rec = test_rec['feat_dynamic_real']
dyna_feats = test_rec['feat_dynamic_real']
track_rec = dyna_feats[0]
lap_rec = dyna_feats[1]
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[11]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# ### init
# In[12]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
_dataset_id = 'indy2013-2018-nocarid'
#_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}'
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
# In[15]:
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# ### oracle test
# In[16]:
### test
plens=[2,5,10]
half=[0]
trainids = ["2018"]
#trainids = ["r0.5","r0.6"]
runs = 1
ref_testset = None
_context_ratio = 0.
train_ratio = 0.4
def mytest():
global ref_testset
#half=[True, False]
#plens=[2,5,10,20,30]
acc_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-contigency-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
ret_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-all-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
if os.path.exists(ret_output):
print(f'{ret_output} already exists, bye')
dfacc = pd.read_csv(acc_output)
dfret = pd.read_csv(ret_output)
return dfacc, dfret
config = {'oracle':
{# features in train and test
'fulloracle':MODE_ORACLE,
'notracklap':MODE_NOTRACK + MODE_NOLAP,
'laponly':MODE_ORACLE_LAPONLY,
'trackonly':MODE_ORACLE_TRACKONLY,
# features in test
'fullpred':MODE_PREDTRACK + MODE_PREDPIT,
'predtrack':MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,
'predpit':MODE_PREDPIT + MODE_ORACLE_LAPONLY,
'curtrack':MODE_TESTCURTRACK,
'zerotrack':MODE_TESTZERO
},
'deepAR':{'deepAR':MODE_ORACLE},
'naive':{'naive':MODE_ORACLE},
'zero':{'zero':MODE_ORACLE}
}
ref_testset = get_ref_oracle_testds(plens, half, train_ratio=train_ratio)
dfret, dfacc = dotest(config)
dfret.to_csv(ret_output, float_format='%.3f')
dfacc.to_csv(acc_output, float_format='%.3f')
#dfacc[dfacc['type']=='aa']
return dfacc, dfret
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'evaluate-fulltest-fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 75,410 | 35.911894 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_v0.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, \n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2,
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
acc = len(correct)/len(top1_pred)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, \n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2,
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 148,705 | 36.58999 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_basic.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
nextpit.append(np.nan)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
#pit_model = pit_model_all
pit_model = pit_model_top8
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
retry = 0
while retry < 10:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
if pred_pit_laps <= laps_instint:
retry += 1
if retry == 10:
pred_pit_laps = laps_instint + 1
continue
else:
break
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
debug_report(f'simu_onestep: {startlap}-{endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
return forecasts_et
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
#_debug_carlist
if 12 in nextpit and 12 in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
debugstr = f'nextpit: {nextpit[12]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset with {len(laptime_data)} races, {len(global_carids)} cars')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 137,426 | 36.599726 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
nextpit.append(np.nan)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
# difference test on pit strategy
_pitstrategy_testcar = 12
_pitstrategy_lowmode = True
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
#pit_model = pit_model_all
pit_model = pit_model_top8
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
if carno == _pitstrategy_testcar:
# check strategy for test car
if _pitstrategy_lowmode:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = min(pit_model[0])
else:
pred_pit_laps = min(pit_model[1])
else:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = max(pit_model[0])
else:
pred_pit_laps = max(pit_model[1])
else:
retry = 0
while retry < 10:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
if pred_pit_laps <= laps_instint:
retry += 1
if retry == 10:
pred_pit_laps = laps_instint + 1
continue
else:
break
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
debug_report(f'simu_onestep: {startlap}-{endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
return forecasts_et
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
#_debug_carlist
if 12 in nextpit and 12 in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
debugstr = f'nextpit: {nextpit[12]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset with {len(laptime_data)} races, {len(global_carids)} cars')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 138,147 | 36.611762 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepfactor_simindy500.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=64
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
opt, args = parser.parse_args()
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 6,554 | 31.939698 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepmodels_simindy500.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy500 simulation dataset
simulation dataset:
laptime&rank dataset <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
test_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-test_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "test_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
#if not os.path.exists(outputfile):
# os.mkdir(outputfile)
#predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
ts_entry = [tss[7],tss[0],tss[4]]
forecast_entry = [forecasts[7],forecasts[0],forecasts[4]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--predictionlen", dest="predictionlen", default=50)
parser.add_option("--testlen", dest="testlen", default=50)
opt, args = parser.parse_args()
#set the global length
prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
test_length = int(opt.testlen)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{opt.predictionlen}-c{opt.contextlen}-t{opt.testlen}'
logger.info("runid=%s", runid)
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 8,467 | 32.470356 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_predictor_fastrun.py | #!/usr/bin/env python
# coding: utf-8
# ## Stint-Predictor-Fastrun
#
# based on: LongTerm-Predictor
#
# long term predictor by continuously regressive forecasting
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# In[11]:
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
def get_stint_acc_old(forecasts, trim=2):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# ### init
# In[15]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 95,368 | 36.650612 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/gluonts_models.py | #!/usr/bin/env python
# coding: utf-8
"""
Gluonts Models on the Indy dataset
dataset:
freq, prediction_length, cardinality,train_ds, test_ds
models:
1. classical models
naive,
arima, ets, prophet
2. deep models
deepAR, deepstate, deepFactor
deepAR-Oracle
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile):
global freq, prediction_length, cardinality
with open(inputfile, 'rb') as f:
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
logger.info(f"number of cars: {cardinality}")
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = context_length
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
#forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
ts_entry[idx].iloc[-plot_length:,0].plot(ax=axs) # plot the time series
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model_old(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model_uni(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
#convert to univariate format
# tss: <ts_len, #feature>
# forecasts.sample: < 100, prediction_length, #feature>
#tss_n = []
#for ts in tss:
# tse = ts.to_numpy()
# tss_n.append(tse[:,0].reshape((tse.shape[0])))
#cast_n = []
#for fc in forecasts:
# nfc = fc
# fcs = fc.samples.shape
# nsamples = fc.samples[:,:,0].reshape((fcs[0], fcs[1]))
# nfc.samples = nsamples
# cast_n.append(nfc)
#tss = tss_n
#forecasts = cast_n
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#debug
#print(f'ts_entry shape:{ts_entry[0].shape}, forecast:{forecast_entry[0].samples.shape}')
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
#evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW':
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--batch_size", dest="batch_size", default=32)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--nosave", dest="nosave", action="store_true", default=False)
parser.add_option("--evalmode", dest="evalmode", action="store_true", default=False)
parser.add_option("--distr_output", dest="distr_output", default='student')
parser.add_option("--nocarid", dest="nocarid", action="store_true", default=False)
#obsolete
parser.add_option("--mode", dest="mode", default='train')
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
#ts_type = int(opt.ts_type)
#train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
train_ds, test_ds = load_dataset(opt.inputfile)
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
logger.info('target_dim:%s', target_dim)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{prediction_length}-c{opt.contextlen}-f{freq}-dim{target_dim}-dstr{opt.distr_output}'
logger.info("runid=%s", runid)
# train
classical_models = ['ets', 'arima', 'prophet', 'naive']
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
if opt.distr_output in distr_outputs:
distr_output = distr_outputs[opt.distr_output]
else:
logger.error('output distr no found:%s', opt.distr_output)
exit(-1)
use_feat_static = True
if opt.nocarid:
use_feat_static = False
estimator = init_estimator(opt.model, opt.gpuid,
opt.epochs, opt.batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
if opt.evalmode == False:
if opt.model in classical_models:
predictor = estimator
else:
predictor = estimator.train(train_ds)
if not opt.nosave:
if not os.path.exists(opt.outputfile):
os.mkdir(opt.outputfile)
logger.info('Start to save the model to %s', opt.outputfile)
predictor.serialize(Path(opt.outputfile))
logger.info('End of saving the model.')
else:
if not os.path.exists(opt.outputfile):
logger.error(f'error:{outputfile} not exists')
exit(-1)
logger.info('Start to load the model from %s', opt.outputfile)
predictor = Predictor.deserialize(Path(opt.outputfile))
logger.info('End of loading the model.')
# evaluate
#if opt.multi!=0:
if target_dim > 1:
logger.info('Start MultivariateEvaluator')
evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
else:
logger.info('Start Evaluator')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
evaluate_model(predictor, evaluator, test_ds, opt.outputfile)
#evaluate_model_uni(predictor, evaluator, test_ds, opt.outputfile)
| 19,095 | 34.560521 | 146 | py |
rankpredictor | rankpredictor-master/src/indycar/model/evaluate_fulltest_fastrun.py | #!/usr/bin/env python
# coding: utf-8
# ## evaluate-fulltest-fastrun
#
# based on: evaluate-fulltest
#
# + support different models and test set
#
# + rank prediction directly
# + rank prediction by laptime2rank
# + laptime prediction
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
logger = logging.getLogger(__name__)
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
#MODE_NOPITAGE = 512
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[6]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
#dynamical/static feature configure
#FEATURE_CARID = 1
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_TRACKONLY = 8
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
feature_mode = FEATURE_STATUS,
half_moving_win = 0,
train_ratio=0.8,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
#force
#run_ts = _run_ts
#test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
elif feature_mode == FEATURE_TRACKONLY:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[7]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[8]:
def load_model(model_name,trainid, prediction_length):
with mx.Context(mx.gpu(7)):
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW':
model=f'deepARW-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'deepARW-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank_bylaptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[9]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_cars = [],
datamode = MODE_ORACLE,models = ['oracle']):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,
trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
if _exp_id=='rank':
#rank prediction
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
0)
elif _exp_id=='laptime2rank':
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
elif _exp_id=='timediff2rank':
rank_ret, forecast_ret = eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length)
elif _exp_id=='laptime':
#laptime instead
rank_ret, forecast_ret = eval_laptime(test_ds,tss,forecasts,prediction_length,
global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,get_modestr(datamode),trainid]
ret.extend(metrics[0])
retdf.append(ret)
rank_result[model] = (rank_ret,forecast_ret)
return pred_ret, ds_ret, rank_result, retdf
# In[10]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio = train_ratio
)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
#track_rec,lap_rec = test_rec['feat_dynamic_real']
dyna_feats = test_rec['feat_dynamic_real']
track_rec = dyna_feats[0]
lap_rec = dyna_feats[1]
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[11]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# ### init
# In[12]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
_dataset_id = 'indy2013-2018-nocarid'
#_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2019'
_feature_mode = FEATURE_STATUS
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}'
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
# In[15]:
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# ### oracle test
# In[16]:
### test
plens=[2,5,10]
half=[0]
trainids = ["2018"]
#trainids = ["r0.5","r0.6"]
runs = 1
ref_testset = None
_context_ratio = 0.
train_ratio = 0.4
def mytest():
global ref_testset
#half=[True, False]
#plens=[2,5,10,20,30]
acc_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-contigency-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
ret_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-all-d{_dataset_id}-t{_test_event}-r{runs}-c{_context_ratio}-result.csv'
if os.path.exists(ret_output):
print(f'{ret_output} already exists, bye')
dfacc = pd.read_csv(acc_output)
dfret = pd.read_csv(ret_output)
return dfacc, dfret
config = {'oracle':
{# features in train and test
'fulloracle':MODE_ORACLE,
'notracklap':MODE_NOTRACK + MODE_NOLAP,
'laponly':MODE_ORACLE_LAPONLY,
'trackonly':MODE_ORACLE_TRACKONLY,
# features in test
'fullpred':MODE_PREDTRACK + MODE_PREDPIT,
'predtrack':MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,
'predpit':MODE_PREDPIT + MODE_ORACLE_LAPONLY,
'curtrack':MODE_TESTCURTRACK,
'zerotrack':MODE_TESTZERO
},
'deepAR':{'deepAR':MODE_ORACLE},
'naive':{'naive':MODE_ORACLE},
'zero':{'zero':MODE_ORACLE}
}
ref_testset = get_ref_oracle_testds(plens, half, train_ratio=train_ratio)
dfret, dfacc = dotest(config)
dfret.to_csv(ret_output, float_format='%.3f')
dfacc.to_csv(acc_output, float_format='%.3f')
#dfacc[dfacc['type']=='aa']
return dfacc, dfret
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'evaluate-fulltest-fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 73,154 | 36.210071 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/prophet_telemetry_tsgluon.py | #!/usr/bin/env python
# coding: utf-8
# # Prophet on telemetry ts dataset
#
# refer to telemetry_dataset_gluonts
# In[1]:
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
# In[2]:
### test on one run
from gluonts.dataset.common import ListDataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
from gluonts.model.prophet import ProphetPredictor
def evaluate_model(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# evaluation
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
#plot a example
#ts_entry = tss[7]
#forecast_entry = forecasts[7]
#plot_prob_forecasts(ts_entry, forecast_entry)
return tss, forecasts
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 800
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[-plot_length:].dropna().plot(ax=ax) # plot the time series
plt.plot(ts_entry[-plot_length:].index, ts_entry[-plot_length:].values)
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
# prophet
def evaluate_prophet(test_ds,prediction_length,freq):
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
return evaluate_model(test_ds, predictor)
def run_prophet(prediction_length,freq):
train_ds, test_ds = make_dataset(runs, prediction_length,freq)
evaluate_prophet(test_ds,prediction_length,freq)
def run_prophet_nonan(prediction_length,freq):
train_ds, test_ds = make_dataset_nonan(prediction_length,freq)
evaluate_prophet(test_ds,prediction_length,freq)
# ## Datasets
#
# In[13]:
import pickle
### load indy
with open('telemetry-gluonts-all-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
#freq, train_set, test_set = pickle.load(f, encoding='latin1')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
# In[4]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[5]:
print(f"events: {events}")
# In[14]:
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# global configuration
TS_VSPEED=1
TS_DISTANCE=2
run_ts = TS_VSPEED
# In[16]:
tss, forecast =evaluate_prophet(test_ds,prediction_length,freq)
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'prophet-tele-00')
# In[12]:
# test all
#run_prophet_nonan(-1, 50, '1D')
# ### R-predictor
# In[17]:
from gluonts.model.r_forecast import RForecastPredictor
est = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
arima = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length)
#
##train_ds, test_ds = make_dataset_nonan(1, prediction_length,freq)
##train_ds, test_ds = make_dataset(prediction_length,freq)
#
tss, forecast = evaluate_model(test_ds, est)
#
#
## In[18]:
#
#
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'ets-tele-00')
#
#
## In[19]:
#
#
tss, forecast = evaluate_model(test_ds, arima)
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry,'arima-tele-00')
#
#
## ### DeepAR
#
## In[21]:
#
#
#with open('telemetry-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
# global_carids, telemetry_data_indy = pickle.load(f, encoding='latin1')
exit(0)
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
#cardinality = [len(global_carids)]
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length = 3*prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(2)",
epochs=500,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
# In[ ]:
#train_ds, test_ds, train_set, test_set = make_dataset_interpolate(prediction_length,freq)
#train_ds, test_ds, train_set, test_set = make_dataset_interpolate(prediction_length,'1S')
# In[23]:
predictor = estimator.train(train_ds)
modeldir = 'deepar-tele'
if not os.path.exists(modeldir):
os.mkdir(modeldir)
predictor.serialize(Path(modeldir))
# In[24]:
from gluonts.evaluation.backtest import make_evaluation_predictions
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
# In[25]:
forecasts = list(forecast_it)
# In[26]:
tss = list(ts_it)
# In[ ]:
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'deepar-tele-00')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
| 6,146 | 22.551724 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_paper.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def load_model(prediction_length, model_name,trainid='2018',exproot='remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
#
# simulation
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
if nextpos+1 < rec.shape[1]:
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
_use_mean = True # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# evaluation code in Evaluate-forecasts-paper
#
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
"""
calculate prisk by convert <samples, tss> into gluonts format
"""
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
#
# following functions works for short-term results only
#
#def get_allsamples(year=2018, model='pitmodel'):
def get_allsamples_ex(dfx):
"""
dfx is the results of multiple runs, ret of test_model call
dfx[runid] -> < df, samples, tss>
"""
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
#
# empty main
#
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
| 130,627 | 34.680961 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_modules_beforeconfig.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, events_id, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data, events_id,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
#print('skip this event:', events[_data[0]])
print('skip this event:', _data[0])
break
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data, events_id,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
| 95,655 | 32.539972 | 189 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar_laptime-rank.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on laptime&rank dataset
#
# laptime&rank dataset
# <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
# In[1]:
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
from pathlib import Path
print("deepar.py <ts_type> <epochs> <gpuid>")
import sys
if len(sys.argv)!=4:
exit(-1)
ts_type = int(sys.argv[1])
epochs = int(sys.argv[2])
gpudevice = int(sys.argv[3])
runid='deepar_indy_e%d_ts%d'%(epochs, ts_type)
# In[2]:
import pickle
with open('laptime_rank-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
# In[3]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[4]:
print(f"events: {events}")
# To download one of the built-in datasets, simply call get_dataset with one of the above names. GluonTS can re-use the saved dataset so that it does not need to be downloaded again: simply set `regenerate=False`.
# In[5]:
laptime_data[2][2].astype(np.float32)
# In[6]:
# global configuration
prediction_length = 50
freq = "1H"
cardinality = [len(global_carids)]
TS_LAPTIME=2
TS_RANK=3
run_ts = ts_type
# In[7]:
from gluonts.dataset.common import ListDataset
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
#map rowid -> carno -> global_carid
#carids = list(_data[1].values())
#global_carid = global_carids[_data[1][rowid]]
_train = [{'target': _data[run_ts][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
_test = [{'target': _data[run_ts][rowid, :].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
# In[8]:
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
# In general, the datasets provided by GluonTS are objects that consists of three main members:
#
# - `dataset.train` is an iterable collection of data entries used for training. Each entry corresponds to one time series
# - `dataset.test` is an iterable collection of data entries used for inference. The test dataset is an extended version of the train dataset that contains a window in the end of each time series that was not seen during training. This window has length equal to the recommended prediction length.
# - `dataset.metadata` contains metadata of the dataset such as the frequency of the time series, a recommended prediction horizon, associated features, etc.
# In[9]:
from gluonts.dataset.util import to_pandas
# ## Training an existing model (`Estimator`)
#
# GluonTS comes with a number of pre-built models. All the user needs to do is configure some hyperparameters. The existing models focus on (but are not limited to) probabilistic forecasting. Probabilistic forecasts are predictions in the form of a probability distribution, rather than simply a single point estimate.
#
# We will begin with GulonTS's pre-built feedforward neural network estimator, a simple but powerful forecasting model. We will use this model to demonstrate the process of training a model, producing forecasts, and evaluating the results.
#
# GluonTS's built-in feedforward neural network (`SimpleFeedForwardEstimator`) accepts an input window of length `context_length` and predicts the distribution of the values of the subsequent `prediction_length` values. In GluonTS parlance, the feedforward neural network model is an example of `Estimator`. In GluonTS, `Estimator` objects represent a forecasting model as well as details such as its coefficients, weights, etc.
#
# In general, each estimator (pre-built or custom) is configured by a number of hyperparameters that can be either common (but not binding) among all estimators (e.g., the `prediction_length`) or specific for the particular estimator (e.g., number of layers for a neural network or the stride in a CNN).
#
# Finally, each estimator is configured by a `Trainer`, which defines how the model will be trained i.e., the number of epochs, the learning rate, etc.
# In[12]:
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
# In[13]:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%d)"%gpudevice,
epochs="%d"%epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
# After specifying our estimator with all the necessary hyperparameters we can train it using our training dataset `dataset.train` by invoking the `train` method of the estimator. The training algorithm returns a fitted model (or a `Predictor` in GluonTS parlance) that can be used to construct forecasts.
# In[14]:
predictor = estimator.train(train_ds)
outputfile=runid
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
# With a predictor in hand, we can now predict the last window of the `dataset.test` and evaluate our model's performance.
#
# GluonTS comes with the `make_evaluation_predictions` function that automates the process of prediction and model evaluation. Roughly, this function performs the following steps:
#
# - Removes the final window of length `prediction_length` of the `dataset.test` that we want to predict
# - The estimator uses the remaining data to predict (in the form of sample paths) the "future" window that was just removed
# - The module outputs the forecast sample paths and the `dataset.test` (as python generator objects)
# In[15]:
from gluonts.evaluation.backtest import make_evaluation_predictions
# In[16]:
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
# First, we can convert these generators to lists to ease the subsequent computations.
# In[17]:
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[52]
# first entry of the forecast list
forecast_entry = forecasts[52]
# In[28]:
print(f"Number of sample paths: {forecast_entry.num_samples}")
print(f"Dimension of samples: {forecast_entry.samples.shape}")
print(f"Start date of the forecast window: {forecast_entry.start_date}")
print(f"Frequency of the time series: {forecast_entry.freq}")
# We can also do calculations to summarize the sample paths, such computing the mean or a quantile for each of the 48 time steps in the forecast window.
# In[29]:
print(f"Mean of the future window:\n {forecast_entry.mean}")
print(f"0.5-quantile (median) of the future window:\n {forecast_entry.quantile(0.5)}")
# `Forecast` objects have a `plot` method that can summarize the forecast paths as the mean, prediction intervals, etc. The prediction intervals are shaded in different colors as a "fan chart".
# In[30]:
def plot_prob_forecasts(ts_entry, forecast_entry):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(runid + '.pdf')
# In[31]:
plot_prob_forecasts(ts_entry, forecast_entry)
# We can also evaluate the quality of our forecasts numerically. In GluonTS, the `Evaluator` class can compute aggregate performance metrics, as well as metrics per time series (which can be useful for analyzing performance across heterogeneous time series).
# In[32]:
from gluonts.evaluation import Evaluator
# In[33]:
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
# Aggregate metrics aggregate both across time-steps and across time series.
# In[34]:
print(json.dumps(agg_metrics, indent=4))
# Individual metrics are aggregated only across time-steps.
# In[35]:
item_metrics.head()
| 9,323 | 30.714286 | 428 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_beforeclean.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid,exproot='remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
_use_mean = False # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 154,599 | 36.30695 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
nextpit.append(np.nan)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel_top8 = True
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
if _pitmodel_top8:
pit_model = pit_model_top8
else:
pit_model = pit_model_all
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
retry = 0
while retry < 10:
if caution_laps_instint <= 10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
if pred_pit_laps <= laps_instint:
retry += 1
if retry == 10:
pred_pit_laps = laps_instint + 1
continue
else:
break
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
return forecasts_et
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
#_debug_carlist
if 12 in nextpit and 12 in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
debugstr = f'nextpit: {nextpit[12]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9'
def init():
global global_carids, laptime_data, global_start_offset, decode_carids
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset with {len(laptime_data)} races, {len(global_carids)} cars')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
acc = len(correct)/len(top1_pred)
rmse = mean_squared_error(df['pred_endrank'].values , df['endrank'].values)
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
return acc, mae, rmse, r2
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=2)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 142,364 | 36.484202 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/savedata4deepartf.py | #!/usr/bin/env python
# coding: utf-8
"""
Gluonts Models on the Indy dataset
dataset:
freq, prediction_length, cardinality,train_ds, test_ds
models:
1. classical models
naive,
arima, ets, prophet
2. deep models
deepAR, deepstate, deepFactor
deepAR-Oracle
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparsavedata import DeepARSaveDataEstimator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile):
global freq, prediction_length, cardinality
with open(inputfile, 'rb') as f:
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
logger.info(f"number of cars: {cardinality}")
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = context_length
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
#forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
ts_entry[idx].iloc[-plot_length:,0].plot(ax=axs) # plot the time series
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model_old(estimator, train_ds, test_ds, outputfile, samplecnt = 100):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=samplecnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model_uni(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model(predictor, evaluator, test_ds, outputfile, samplecnt = 100):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=samplecnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
#convert to univariate format
# tss: <ts_len, #feature>
# forecasts.sample: < 100, prediction_length, #feature>
#tss_n = []
#for ts in tss:
# tse = ts.to_numpy()
# tss_n.append(tse[:,0].reshape((tse.shape[0])))
#cast_n = []
#for fc in forecasts:
# nfc = fc
# fcs = fc.samples.shape
# nsamples = fc.samples[:,:,0].reshape((fcs[0], fcs[1]))
# nfc.samples = nsamples
# cast_n.append(nfc)
#tss = tss_n
#forecasts = cast_n
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#debug
#print(f'ts_entry shape:{ts_entry[0].shape}, forecast:{forecast_entry[0].samples.shape}')
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
#evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepARSaveDataEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARSaveDataEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--batch_size", dest="batch_size", default=32)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--nosave", dest="nosave", action="store_true", default=False)
parser.add_option("--evalmode", dest="evalmode", action="store_true", default=False)
parser.add_option("--distr_output", dest="distr_output", default='student')
parser.add_option("--nocarid", dest="nocarid", action="store_true", default=False)
#obsolete
parser.add_option("--mode", dest="mode", default='train')
parser.add_option("--savedata", dest="savedata", default='savedata')
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
#ts_type = int(opt.ts_type)
#train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
train_ds, test_ds = load_dataset(opt.inputfile)
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
logger.info('target_dim:%s', target_dim)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{prediction_length}-c{opt.contextlen}-f{freq}-dim{target_dim}-dstr{opt.distr_output}'
logger.info("runid=%s", runid)
# train
classical_models = ['ets', 'arima', 'prophet', 'naive']
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
if opt.distr_output in distr_outputs:
distr_output = distr_outputs[opt.distr_output]
else:
logger.error('output distr no found:%s', opt.distr_output)
exit(-1)
use_feat_static = True
if opt.nocarid:
use_feat_static = False
estimator = init_estimator(opt.model, opt.gpuid,
opt.epochs, opt.batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
if opt.evalmode == False:
if opt.model in classical_models:
predictor = estimator
else:
predictor = estimator.train(train_ds)
data = estimator.network.savedata
#if not opt.nosave:
# if not os.path.exists(opt.outputfile):
# os.mkdir(opt.outputfile)
#
# logger.info('Start to save the model to %s', opt.outputfile)
# predictor.serialize(Path(opt.outputfile))
# logger.info('End of saving the model.')
else:
if not os.path.exists(opt.outputfile):
logger.error(f'error:{outputfile} not exists')
exit(-1)
logger.info('Start to load the model from %s', opt.outputfile)
predictor = Predictor.deserialize(Path(opt.outputfile))
logger.info('End of loading the model.')
# evaluate
if opt.evalmode == True:
#if opt.multi!=0:
if target_dim > 1:
logger.info('Start MultivariateEvaluator')
evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
else:
logger.info('Start Evaluator')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
#forece to single item
print('batch size:', predictor.batch_size)
predictor.batch_size = 1
print('batch size reset:', predictor.batch_size)
evaluate_model(predictor, evaluator, test_ds, opt.outputfile, samplecnt=1)
#evaluate_model_uni(predictor, evaluator, test_ds, opt.outputfile)
#
#predictor.prediction_net.rnn.summary()
data = predictor.prediction_net.savedata
#target = estimator.network.savetarget
#other = estimator.network.saveother
print('len(savedata):', data.keys())
savefile = opt.savedata
with open(savefile, 'wb') as f:
savedata = [data['input'], data['target'],0]
#pickle.dump([data,target,other], f, pickle.HIGHEST_PROTOCOL)
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
with open("alldata-" + savefile, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
logger.info('Save data size=%d to %s'%(len(data), savefile))
| 18,872 | 34.744318 | 146 | py |
rankpredictor | rankpredictor-master/src/indycar/model/prophet_laptime-rank-v2.py | #!/usr/bin/env python
# coding: utf-8
# # Prophet on laptime&rank dataset
#
# https://gluon-ts.mxnet.io/api/gluonts/gluonts.model.prophet.html
#
# laptime&rank dataset
# <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
# In[1]:
# Third-party imports
get_ipython().run_line_magic('matplotlib', 'inline')
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
# In[2]:
### test on one run
from gluonts.dataset.common import ListDataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def make_dataset(runs, prediction_length, freq,
run_ts=2, train_ratio = 0.8,
use_global_dict = True):
"""
split the ts to train and test part by the ratio
"""
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#_data: eventid, carids, laptime array
for _data in _laptime_data:
_train = []
_test = []
#statistics on the ts length
ts_len = [ x.shape[0] for x in _data[run_ts]]
train_len = int(np.max(ts_len) * train_ratio)
print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
for rowid in range(_data[run_ts].shape[0]):
rec = _data[run_ts][rowid, :].copy()
#remove nan
nans, x= nan_helper(rec)
nan_count = np.sum(nans)
rec = rec[~np.isnan(rec)]
# remove short ts
totallen = rec.shape[0]
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
# split and add to dataset record
_train.append({'target': rec[:train_len].astype(np.float32),
'start': start,
'feat_static_cat': carid}
)
# multiple test ts(rolling window as half of the prediction_length)
test_rec_cnt = 0
for endpos in range(totallen, train_len+prediction_length, -int(prediction_length/2)):
_test.append({'target': rec[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': carid}
)
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def evaluate_model(test_ds,predictor, output=''):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# evaluation
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
#plot a example
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, output)
def plot_prob_forecasts(ts_entry, forecast_entry, output):
plot_length = 50
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
if output:
plt.savefig(output + '.pdf')
plt.show()
# prophet
def run_prophet(dataset, prediction_length,freq, output=''):
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# ets
def run_ets(dataset, prediction_length,freq, output=''):
predictor = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# arima
def run_ets(dataset, prediction_length,freq, output=''):
predictor = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# ## Indy Dataset
#
# In[3]:
import pickle
### load indy
with open('laptime_rank-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data_indy = pickle.load(f, encoding='latin1')
# In[4]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[5]:
print(f"events: {events}")
# In[6]:
laptime_data = laptime_data_indy
laptime_data[2][2].astype(np.float32)
# In[7]:
# global configuration
prediction_length = 5
freq = "1min"
cardinality = [len(global_carids)]
TS_LAPTIME=2
TS_RANK=3
run_ts = TS_LAPTIME
# In[8]:
#run on indy500 dataset
train_ds, test_ds,_,_ = make_dataset(1, prediction_length,freq)
# In[9]:
get_ipython().run_line_magic('debug', '')
# In[ ]:
output = f'Prophet-indy-indy500'
run_prophet(test_ds, prediction_length, freq, output)
output = f'ETS-indy-indy500'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-indy-indy500'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
# In[ ]:
# test all
train_ds, test_ds,_,_ = make_dataset(-1, prediction_length,freq)
output = f'Prophet-indy-all'
run_prophet(test_ds, prediction_length, freq, output)
output = f'ETS-indy-all'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-indy-all'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
entry = next(iter(train_ds))
train_series = to_pandas(entry)
entry = next(iter(test_ds))
test_series = to_pandas(entry)
test_series.plot()
plt.axvline(train_series.index[-1], color='r') # end of train dataset
plt.grid(which="both")
plt.legend(["test series", "end of train series"], loc="upper left")
plt.show()
# Individual metrics are aggregated only across time-steps.
# In[ ]:
item_metrics.head()
# In[ ]:
item_metrics.plot(x='MSIS', y='MASE', kind='scatter')
plt.grid(which="both")
plt.show()
# In[ ]:
# ### test on sim-indy dataset
# In[ ]:
import pickle
with open('sim-indy500-laptime-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data_simindy = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
# In[ ]:
laptime_data = laptime_data_simindy
#run on indy500 dataset
train_ds, test_ds,_,_ = make_dataset(1, prediction_length,freq, use_global_dict=False)
output = f'Prophet-simindy-indy500'
run_prophet(test_ds, prediction_length, freq, output)
# In[ ]:
get_ipython().run_line_magic('debug', '')
# In[ ]:
output = f'ETS-simindy-indy500'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-simindy-indy500'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
# test all
#train_ds, test_ds,_,_ = make_dataset(-1, prediction_length,freq)
#output = f'Prophet-simindy-all'
#run_prophet(test_ds, prediction_length, freq, output)
#output = f'ETS-simindy-all'
#run_ets(test_ds, prediction_length, freq, output)
#output = f'ARIMA-simindy-all'
#run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
| 9,548 | 25.090164 | 121 | py |
rankpredictor | rankpredictor-master/src/indycar/model/evaluate-fulltest.py | #!/usr/bin/env python
# coding: utf-8
# ## evaluate-fulltest
#
# based on: Laptime2Rank-evaluate-fulltest-disturbance
#
# + rank prediction directly
# + rank prediction by laptime2rank
# + laptime prediction
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[4]:
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
_dataset_id = 'indy2013-2018-nocarid'
#_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2019'
#_task_id = 'rank' # rank,laptime, the trained model's task
#_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
#_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
#_task_id = 'laptime' # rank,laptime, the trained model's task
#_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
#_exp_id='laptime' #rank, laptime, laptim2rank, timediff2rank...
# In[5]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[6]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= _run_ts,
test_event = _test_event,
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if verbose:
print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
#'feat_dynamic_real': [rec[COL_TRACKSTATUS,:endpos],
# rec[COL_LAPSTATUS,:endpos]]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[7]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[8]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank_bylaptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[9]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event=_test_event, test_cars = [],
datamode = MODE_ORACLE,models = ['oracle']):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,
trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
if _exp_id=='rank':
#rank prediction
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
0)
elif _exp_id=='laptime2rank':
rank_ret, forecast_ret = eval_rank(test_ds,tss,forecasts,prediction_length,
global_start_offset[test_event])
elif _exp_id=='laptime':
#laptime instead
rank_ret, forecast_ret = eval_laptime(test_ds,tss,forecasts,prediction_length,
global_start_offset[test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,get_modestr(datamode),trainid]
ret.extend(metrics[0])
retdf.append(ret)
rank_result[model] = (rank_ret,forecast_ret)
return pred_ret, ds_ret, rank_result, retdf
# In[10]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8,
test_event=_test_event, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# ### init
# In[11]:
#
# parameters
#
stagedata = {}
global_carids = {}
traindata = None
cur_carid = 0
#years = ['2011','2012','2013', '2014', '2015', '2016', '2017']
years = ['2013','2014','2015','2016','2017','2018','2019']
#events = ['Indy500']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}'
global_start_offset = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# In[12]:
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
with open(f'laptime_rank_timediff_pit-oracle-{dbid}.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
# In[13]:
freq = "1min"
#decode global_carids
decode_carids={carid:carno for carno, carid in global_carids.items()}
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# ### oracle test
# In[14]:
def check_testds(datamode, test_event=_test_event, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
# In[15]:
### test
def mytest():
#half=[True, False]
#plens=[2,5,10,20,30]
plens=[2,5,10]
half=[0]
trainids = ["2018"]
#trainids = ["r0.5","r0.6"]
runs = 1
train_ratio=0.4
acc_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-contigency-d{_dataset_id}-t{_test_event}-r{runs}-tr{train_ratio}-result.csv'
ret_output = f'{_exp_id}-evaluate-mean-splitbyevent-fulltest-all-d{_dataset_id}-t{_test_event}-r{runs}-tr{train_ratio}-result.csv'
#trainids = ["indy500"]
#runs = 1
#plens=[2]
config = {'oracle':
{'fulloracle':MODE_ORACLE,'notracklap':MODE_NOTRACK + MODE_NOLAP,
'laponly':MODE_ORACLE_LAPONLY, 'trackonly':MODE_ORACLE_TRACKONLY,
'fullpred':MODE_PREDTRACK + MODE_PREDPIT,
'predtrack':MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,
'predpit':MODE_PREDPIT + MODE_ORACLE_LAPONLY,
'curtrack':MODE_TESTCURTRACK,
'zerotrack':MODE_TESTZERO
},
'deepAR':{'deepAR':MODE_ORACLE},
'naive':{'naive':MODE_ORACLE},
'zero':{'zero':MODE_ORACLE}
}
ref_testset = get_ref_oracle_testds(plens, half, train_ratio=train_ratio)
dfret, dfacc = dotest(config)
dfret.to_csv(ret_output, float_format='%.3f')
dfacc.to_csv(acc_output, float_format='%.3f')
dfacc[dfacc['type']=='aa']
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'evaluate-fulltest.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--nosave", dest="nosave", action="store_true", default=False)
parser.add_option("--evalmode", dest="evalmode", action="store_true", default=False)
#obsolete
parser.add_option("--mode", dest="mode", default='train')
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
#ts_type = int(opt.ts_type)
#train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
train_ds, test_ds = load_dataset(opt.inputfile)
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
logger.info('target_dim:%s', target_dim)
| 65,597 | 35.023064 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepmodels_indy_gluontsdb.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy dataset
dataset:
freq, prediction_length, cardinality,train_ds, test_ds
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.NaivePredictor import NaivePredictor
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile):
global freq, prediction_length, cardinality
with open(inputfile, 'rb') as f:
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
logger.info(f"number of cars: {cardinality}")
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = context_length
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
#forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
ts_entry[idx].iloc[-plot_length:,0].plot(ax=axs) # plot the time series
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model_old(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model_uni(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def evaluate_model(predictor, evaluator, test_ds, outputfile):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
#convert to univariate format
# tss: <ts_len, #feature>
# forecasts.sample: < 100, prediction_length, #feature>
#tss_n = []
#for ts in tss:
# tse = ts.to_numpy()
# tss_n.append(tse[:,0].reshape((tse.shape[0])))
#cast_n = []
#for fc in forecasts:
# nfc = fc
# fcs = fc.samples.shape
# nsamples = fc.samples[:,:,0].reshape((fcs[0], fcs[1]))
# nfc.samples = nsamples
# cast_n.append(nfc)
#tss = tss_n
#forecasts = cast_n
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
#debug
#print(f'ts_entry shape:{ts_entry[0].shape}, forecast:{forecast_entry[0].samples.shape}')
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
#evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100, target_dim = 3):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=True,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--nosave", dest="nosave", action="store_true", default=False)
parser.add_option("--evalmode", dest="evalmode", action="store_true", default=False)
#obsolete
parser.add_option("--mode", dest="mode", default='train')
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
#ts_type = int(opt.ts_type)
#train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
train_ds, test_ds = load_dataset(opt.inputfile)
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
logger.info('target_dim:%s', target_dim)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{prediction_length}-c{opt.contextlen}-f{freq}-dim{target_dim}'
logger.info("runid=%s", runid)
# train
classical_models = ['ets', 'arima', 'prophet', 'naive']
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs, target_dim)
if opt.evalmode == False:
if opt.model in classical_models:
predictor = estimator
else:
predictor = estimator.train(train_ds)
if not opt.nosave:
if not os.path.exists(opt.outputfile):
os.mkdir(opt.outputfile)
logger.info('Start to save the model to %s', opt.outputfile)
predictor.serialize(Path(opt.outputfile))
logger.info('End of saving the model.')
else:
if not os.path.exists(opt.outputfile):
logger.error(f'error:{outputfile} not exists')
exit(-1)
logger.info('Start to load the model from %s', opt.outputfile)
predictor = Predictor.deserialize(Path(opt.outputfile))
logger.info('End of loading the model.')
# evaluate
#if opt.multi!=0:
if target_dim > 1:
logger.info('Start MultivariateEvaluator')
evaluator = MultivariateEvaluator(quantiles=[0.1, 0.5, 0.9])
else:
logger.info('Start Evaluator')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
evaluate_model(predictor, evaluator, test_ds, opt.outputfile)
#evaluate_model_uni(predictor, evaluator, test_ds, opt.outputfile)
| 14,005 | 33.412776 | 123 | py |
rankpredictor | rankpredictor-master/src/indycar/model/NaivePredictor.py | #!/usr/bin/env python
# coding: utf-8
import mxnet as mx
from mxnet import gluon
import numpy as np
import json
import pandas as pd
from typing import Callable, Dict, Iterator, NamedTuple, Optional, List
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.model.predictor import Predictor
from gluonts.model.forecast import SampleForecast
class NaivePredictor(Predictor):
@validated()
def __init__(self,
freq: str,
prediction_length: int) -> None:
self.prediction_length=prediction_length
if freq=='1min':
freq = 'T'
self.freq = freq
self.lead_time = 0
def predict(
self, dataset: Dataset, num_samples: int = 100, **kwargs
) -> Iterator[SampleForecast]:
for entry in dataset:
train_length = len(entry["target"])
prediction_length = self.prediction_length
start = entry["start"]
target = entry["target"]
feat_dynamic_real = entry.get("feat_dynamic_real", [])
#forecast_samples = self._run_prophet(data, params)
#target_dim = target.shape[0] if len(target.shape) > 1 else 1
if len(target.shape) > 1:
#multivariate
target_dim = target.shape[0]
target_len = target.shape[1]
else:
target_dim = 1
target_len = target.shape[0]
if target_dim ==1 :
forecast_samples = np.zeros((num_samples, prediction_length))
#navie prediction with the last status of target
#forecast_samples[:] = target[-prediction_length]
forecast_samples[:] = target[-1]
else:
forecast_samples = np.zeros((num_samples, prediction_length, target_dim))
#forecast_samples[:,:] = target[-prediction_length]
forecast_samples[:,:] = target[-1]
yield SampleForecast(
samples=forecast_samples,
#start_date=start + target_len,
start_date=start + pd.Timedelta(target_len, unit=self.freq),
freq=self.freq,
)
| 2,276 | 32.485294 | 89 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel_joint.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
#
# joint train model
#
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
# In[ ]:
# In[4]:
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
idx = {run_ts, COL_LAPSTATUS}
target_val = rec[list(idx)].copy().astype(np.float32)
#target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq,one_dim_target=False)
test_ds = ListDataset(test_set, freq=freq,one_dim_target=False)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
#model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
#model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
target_idx = {run_ts, COL_LAPSTATUS}
target_dim = len(target_idx)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((7, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1:3,:] = rec[list(target_idx)].copy().astype(np.float32)
forecasts_et[carno][5:7,:] = rec[list(target_idx)].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][5:7,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq,one_dim_target=False)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((target_dim, prediction_length))
pos = len(tss[idx]) - prediction_length
debug_report('before forecast samples:', forecasts_et[carno][5][pos:], prediction_length, carno)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
#forecasts_et[carno][5:7, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
forecasts_et[carno][5:7, len(tss[idx]) - prediction_length:len(tss[idx])] = np.transpose(forecast_laptime_mean.copy())
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1,0].reshape(-1)
#debug
pos = len(tss[idx]) - prediction_length
debug_report('after update forecast samples:', forecasts_et[carno][5][pos:], prediction_length, carno)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# pred sim
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1:3,: -> true target
3, -> placeholder
4, -> placeholder
5:7,: -> pred target
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
pred_idx = 5
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][5,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, \n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2,
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
acc = len(correct)/len(top1_pred)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, \n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2,
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 149,702 | 36.670609 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_simulator_sota.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: stint_simulator_shortterm_pitmodel.py
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.model.deepvar import DeepVAREstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
#from gluonts.model.deepar import DeepAREstimator
from indycar.model.deepar import DeepAREstimator
import indycar.model.global_variables as gvar
from indycar.model.ListDataSetX import ListDatasetX
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
COL_TARGET_PREDICTED = 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def update_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1,
#target_pred = None,
rank_col = COL_RANK,
verbose = False):
"""
update the features in laptime data
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
global laptime_data
#inplace update
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
# check shift len
if shift_len < 0:
shift_len = prediction_length
if verbose:
print('update_laptimedata shift len:', shift_len, test_idx)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
if test_idx >= 0:
_data = laptime_data[test_idx]
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if verbose:
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
# add new features
# add leaderPitCnt
#if _data[0]==0:
# verbose = True
#else:
# verbose = False
verbose = False
#
# be careful on leader_cnt for the future rank leaking
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
#if not target_pred:
# # update leader_cnt by predicted target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
# rank_col = COL_TARGET_PREDICTED,
# dest_col=dest_col, verbose = verbose)
#else:
# # update leader_cnt by true target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
rank_col = rank_col,
dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
#new_data.append([_data[0], _data[1], data2_newfeature])
laptime_data[test_idx][2] = data2_newfeature
return laptime_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
if test_idx >= 0:
print('Set a new global laptime_data, test_event=%s, cnt=%d, shape=%s'%(_test_event, len(newdata), newdata[test_idx][2].shape))
else:
print('Error, test event not found in laptimedata', _test_event)
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
# pit model is separate for each car
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
if int(gvar.gpuid) < 0:
#ctx = "cpu"
ctx = mx.cpu()
else:
#ctx = "gpu(%s)"%gpuid
ctx = mx.gpu(gvar.gpuid)
modeldir = ''
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'deepAR-Oracle' or model_name == 'deepAR-MLP':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
#deeparw-oracle
elif model_name == 'weighted-oracle' or model_name == 'deepARW-Oracle' or model_name == 'deepARW-MLP':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'oracle' or (model_name.find('pitmodel') == 0):
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'deepAR-multi':
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'joint' or model_name == 'deepARW-multi' or model_name == 'RankNet-Joint':
model=f'deepARW-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
# transformer
elif model_name == 'transformer' or model_name == 'Transformer':
model=f'Transformer-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'Transformer-MLP' or model_name == 'Transformer-Oracle':
model=f'Transformer-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'TransformerW-MLP' or model_name == 'TransformerW-Oracle':
model=f'TransformerW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'TransformerF-MLP' or model_name == 'TransformerF-Oracle':
model=f'TransformerF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'TransformerWF-MLP' or model_name == 'TransformerWF-Oracle':
model=f'TransformerWF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
elif model_name == 'TransformerWFM-MLP' or model_name == 'TransformerWFM-Oracle':
model=f'TransformerWFM-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
# deepAR
elif model_name == 'deepAR' or model_name == 'standard':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
#model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
# deepFactor
elif model_name == 'deepFactor' or model_name == 'deepState' or model_name == 'nbeats' or model_name == 'deepFactorX' or model_name == 'deepVAR':
model=f'{model_name}-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=gvar.context_length)
else:
print(f'error: model {model_name} not support yet!')
if modeldir:
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir), ctx=ctx)
print(f'loading model...done!, ctx:{predictor.ctx}')
return predictor
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
extend laptime data space to COL_ENDPOS
save the lapstatus in laptime_data
"""
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
print('sim_init: input laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape, test_idx)
#update this laptime record
if test_idx >= 0:
_data = laptime_data[test_idx][2]
dim1, dim2, dim3 = _data.shape
if dim2 < COL_ENDPOS:
#create a new data
newmat = np.zeros((dim1, COL_ENDPOS, dim3))
newmat[:,:dim2,:] = _data.copy()
else:
newmat = _data
#save pit model related features
newmat[:,COL_TRACKSTATUS_SAVE,:] = newmat[:,COL_TRACKSTATUS, :]
newmat[:,COL_LAPSTATUS_SAVE,:] = newmat[:,COL_LAPSTATUS, :]
newmat[:,COL_CAUTION_LAPS_INSTINT_SAVE,:] = newmat[:,COL_CAUTION_LAPS_INSTINT, :]
newmat[:,COL_LAPS_INSTINT_SAVE, :] = newmat[:,COL_LAPS_INSTINT, :]
# reset
if dim2 < COL_ENDPOS:
laptime_data[test_idx][2] = newmat
print('sim_init: after laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape)
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
#check the test_event, the same as the training event?a
pitmodel_trainevent = gvar.trainrace
eid = _test_event.split('-')[0]
pitscale = gvar.events_info[pitmodel_trainevent][1] *1.0 / gvar.events_info[eid][1]
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno, pitscale = pitscale)
_pitmodel = None
def update_onets(rec, startlap, carno, pitscale = 1.):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_TRACKSTATUS,:] = 0
rec[COL_LAPSTATUS,:] = 0
rec[COL_TRACKSTATUS,:endpos] = rec[COL_TRACKSTATUS_SAVE, :endpos]
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
#scale
if pitscale != 1.0:
caution_laps_instint = int(caution_laps_instint / pitscale)
laps_instint = int(laps_instint / pitscale)
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint) + _pitmodel_bias
#update by pitscale
pred_pit_laps = int(pred_pit_laps * pitscale)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
global laptime_data
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
carno2rowid = {}
_test = []
_tsid = 0
for _data in _laptime_data:
if gvar.events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'{endpos} {endlap} {_data[2].shape} ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
if gvar.static_cat_type == 2:
static_cat = [_tsid]
#save to carno2rowid map
if carno not in carno2rowid:
carno2rowid[carno] = rowid
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# prepare TARGET_PREDICTED in laptime
_data[2][rowid][COL_TARGET_PREDICTED, :] = np.nan
_data[2][rowid][COL_TARGET_PREDICTED, :totallen] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
# ground truth in forecasts_et, (RANK only)
#target_cols = [run_ts, COL_LAPSTATUS]
target_cols = [2, 0]
#target_val = rec[target_cols].copy().astype(np.float32)
target_val = forecasts_et[carno][target_cols,:endpos].astype(np.float)
_test.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
else:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
#jump out
# keep _data as current
_tsid += 1
break
# end of for each ts
#if not _test:
# #error in dataset
# print('Error: empty _test')
# import pdb
# pdb.set_trace()
# break
# RUN Prediction here
#test_ds = ListDataset(_test, freq=freq,one_dim_target= False if _joint_train else True)
test_ds = ListDatasetX(_test, freq=freq,one_dim_target= False if _joint_train else True)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _joint_train:
#
# joint train , multi dimensional target
# samples – Array of size (num_samples, prediction_length) (1D case) or (num_samples, prediction_length, target_dim)
#
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1,0].reshape(-1)
else:
# 1 dimensional target
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1].reshape(-1)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
# update laptime_data
rowid = carno2rowid[carno]
_data[2][rowid][COL_TARGET_PREDICTED,len(tss[idx]) - prediction_length:len(tss[idx])] = forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])]
#debug
if False:
#if carno==13:
#print('samples shape:', forecasts[idx].samples.shape)
print('tss shape:', tss[idx].shape, 'endpos:', endpos)
print('forecast mean:', forecast_laptime_mean, len(tss[idx]) - prediction_length)
print('target true:', forecasts_et[carno][1, len(tss[idx]) - prediction_length:len(tss[idx])])
print('target pred:', forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])])
#save the samples, the farest samples
#forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
forecasts_samples[carno][:] = forecasts_furtherest_samples
#update laptimedata by new predictions
#save predictions into laptime data
# update featues inlaptime data
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length,
rank_col = COL_TARGET_PREDICTED
)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
global laptime_data
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
#if pitlap == 124:
# import pdb
# pdb.set_trace()
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = verbose
)
#pocono-2019
#end with nan, totallen < expected endpos
if not forecast:
break
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
#debug joint
#if True:
# xmat = forecasts_et[13][:, pitlap:pitlap+prediction_length]
# print('debug forecasts_et at ', pitlap)
# print(xmat)
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), gvar.maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((gvar.maxlap))
full_samples[carno] = np.zeros((samplecnt, gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def init(laptimefile, pitmodel = '', pitmodel_bias = 0):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global _inlap_status
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in gvar.events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
#laptimefile = f'laptime_rank_timediff_pit-oracle-{gvar.dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel_bias = pitmodel_bias
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
correct = df[df['sign']==df['pred_sign']]
signacc = len(correct)/len(df)
naive_signcorrect = df[df['sign'] == 0]
naive_signacc = len(naive_signcorrect) / len(df)
print('testset size:', len(df), 'minlap:', minlap, 'maxlap:', maxlap)
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1_pred: {%d}, top1_naive: {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1: {%d}'%(
acc, mae, rmse, r2, len(top1_pred), len(top1_naive),
acc_naive, mae_naive, rmse_naive, r2_naive, len(top1)
)
)
return np.array([[acc, mae, rmse, r2, signacc],[acc_naive, mae_naive, rmse_naive, r2_naive, naive_signacc]])
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_test_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# joint train the target of (rank, lapstatus)
_joint_train = False
_pitmodel_bias = 0
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
_trim = 0
# turn to use gvar
#years = ['2013','2014','2015','2016','2017','2018','2019']
#events = [f'Indy500-{x}' for x in years]
#events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
_lags_seq = [1]
| 84,046 | 33.403193 | 230 | py |
rankpredictor | rankpredictor-master/src/indycar/model/stint_simulator_shortterm_pitmodel.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: Stint-Predictor-Fastrun
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
# share the memory
#COL_LAPSTATUS_PRED = 8 # for dynamic lapstatus predictions
#LAPSTATUS SAVED in forecast_et
COL_LAPSTATUS_SAVE = 0 #laptime no use
COL_CAUTION_LAPS_INSTINT_SAVE=7
COL_LAPS_INSTINT_SAVE= 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'P'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'S'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT"'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
print('Set a new global laptime_data')
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def get_modestr(a):
modestr = ''
for key in _mode_map:
if test_flag(a, key):
modestr += '%s,'%(_mode_map[key])
return modestr
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
# carno -> lap_status
_lap_adjust = {}
_empirical_model = {}
def init_adjust_pitmodel():
global _lap_adjust
_lap_adjust = {}
_empirical_model = {}
def get_adjust_lapstatus(carno, lapstatus, force = True):
"""
init the lapstatus for each car, save it for future reference
input:
carno;
lapstatus ; the trueth
"""
if carno not in _lap_adjust:
#adjust it
lapadjust = lapstatus.copy()
for pos in range(0, len(lapstatus)):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < len(lapstatus):
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
#add statistics
if pos_adjust not in _empirical_model:
_empirical_model[pos_adjust] = 1
else:
_empirical_model[pos_adjust] += 1
if force==False:
break
_lap_adjust[carno] = lapadjust
return _lap_adjust[carno]
def build_random_model(modeldict):
"""
input:
modeldict ; {val: probability}
return:
model ; [val, cdf]
"""
# val, cdf
cdf = 0
model = np.zeros((len(modeldict), 2))
for idx, val in enumerate(sorted(modeldict.keys())):
model[idx, 0] = val
model[idx, 1] = cdf + modeldict[val]
cdf = model[idx, 1]
#normalize
model[:, 1] = model[:, 1]/cdf
return model
def print_model(model, iscdf=True):
"""
input:
model ; [val, cdf]
"""
sorted_model = model[np.argsort(model[:, 0])]
cdf = 0
sumval = 1.
if not iscdf:
sumval = np.sum(sorted_model[:,1])
ret = []
for row in sorted_model:
ret.append((row[0], (row[1]-cdf)/sumval))
if iscdf:
cdf = row[1]
#output
print(['%d:%.3f'%(x[0],x[1]) for x in ret])
def get_random_choice(model):
"""
input:
model ; [val, cdf]
return:
val according to its probability
"""
target = np.random.rand()
idx = np.sum(model[:,1] < target)
return int(model[idx,0])
#_modeldict={-2:0.1,-1:0.2,0:0.4, 1:0.2, 2:0.1 }
_modeldict={-2:0.1,-1:0.2,0:0.05, 1:0.2, 2:0.1 }
_adjust_model = build_random_model(_modeldict)
def adjust_pit_model(lap_rec, prediction_length, force=True):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
success = False
while(not success):
# adjust this pit lap position
pos_adjust = get_random_choice(_adjust_model)
new_pos = pos + pos_adjust
if new_pos >= 0 and new_pos < prediction_length:
#valid
lapadjust[pos] = 0
lapadjust[new_pos] = 1
success = True
if force==False:
break
return lapadjust
def adjust_pit_model_fix(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
#add one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[6]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle':
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
# In[7]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_laptime(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
#offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
#elapsed_time = np.cumsum(laptime_array) + offset
laptime_array_hat = tss[idx].values.copy()
laptime_array_hat[-prediction_len:] = forecast_laptime_mean
#elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [laptime_array[-prediction_len:].copy(),
laptime_array_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
lap_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
lap_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
lap_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
#idx = np.argsort(elapsed_time[0], axis=0)
#true_rank = np.argsort(idx, axis=0)
true_laptime = lap_time[0]
#idx = np.argsort(elapsed_time[1], axis=0)
#pred_rank = np.argsort(idx, axis=0)
pred_laptime = lap_time[1]
rank_ret.append([lap, lap_time, true_laptime, pred_laptime])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
if isinstance(start_offset, pd.core.frame.DataFrame):
#print('eval_rank:laptime2rank')
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
else:
#print('eval_rank:rank-direct')
# rank directly
elapsed_time = tss[idx].values.copy()
elapsed_time_hat = tss[idx].values.copy()
elapsed_time_hat[-prediction_len:] = forecast_laptime_mean
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length, verbose = False):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
mae = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
#mae
mae += np.sum(np.abs(predRank - trueRank))
recnt = len(rank_ret)
if recnt > 0:
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
mae = mae/recnt
#debug only
if _run_ts == COL_LAPSTATUS:
tau = mae
if verbose:
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse,
'mae = ', mae)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[ ]:
# In[8]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc, datamode='', models=[]):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','datamode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
#testfunc or (datamode & models)
if isinstance(testfunc,str) and (datamode == '' or models == []):
print("error with testfunc")
return
allret = []
alldata_ret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*10)
if not isinstance(testfunc,str):
pred_ret, test_ds, rank_ret, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
else:
pred_ret, test_ds, rank_ret, metric_ret = run_exp(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid,
datamode=datamode,
models=models)
#save
exp_data.append((pred_ret, test_ds, rank_ret))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'datamode','trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
alldata_ret.append(exp_data)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
dfhead = allret[0][['model' , 'prediction_length', 'halfmode', 'datamode','trainid']]
dfaverage = pd.DataFrame(averagemat, columns = ['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfhead, dfaverage, dfstd], axis=1)
#if exp_id != '':
# dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret, alldata_ret
def checkret_status(dataret, runid = 0, idx = 0,model='oracle'):
"""
check the test_ds track and lap status
alldata_ret ; for debug
[runid][halfmode,plen,trainid] -> (pred_ret, test_ds, rank_ret)
pred_ret[model] -> [tss, forecasts]
test_ds[model] -> test_ds
rank_ret[model] -> ([lap, elapsed_time, true_rank, pred_rank],forecast_ret)
forecast_ret[completed_laps][carno] -> (elapsed_time, elapsed_time_hat)
"""
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
ds_iter = iter(test_ds)
yfcnt = 0
pitcnt = 0
for recid in range(len(test_ds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt += np.sum(track_rec[-plen:])
pitcnt += np.sum(lap_rec[-plen:])
print('yfcnt:', yfcnt, 'pitcnt:',pitcnt)
def get_ref_oracle_testds(plens, halfs, train_ratio=0.8, test_cars = []):
testset = {}
for prediction_length in plens:
for half_moving_win in halfs:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
# get key
key = '%d-%d'%(prediction_length,half_moving_win)
testset[key] = test_ds
return testset
def checkret_confusionmat(dataret, ref_testset, runid= 0, testid = '', model='oracle'):
"""
output the 4x4 confusion matrix split by track and lap status
input:
ref_oracle_testds ; oracle test ds
"""
plen_length = len(dataret[runid])
dflist = []
for idx in range(plen_length):
_, plen = dataret[runid][idx][0][model][1][0].samples.shape
test_ds = dataret[runid][idx][1][model]
rank_ret = dataret[runid][idx][2][model][0]
key = '%d-%d'%(plen,0)
if key not in ref_testset:
print(f'error, {key} not found in ref_testset')
continue
ref_oracle_testds = ref_testset[key]
if len(ref_oracle_testds) != len(test_ds):
print('error, size of testds mismatch', len(ref_oracle_testds), len(test_ds))
continue
# confusion matrix for <trackstatus, lapstatus> type: 00,01,10,11
# lap(start lap of prediction) -> type
lapmap = {}
ds_iter = iter(ref_oracle_testds)
for recid in range(len(ref_oracle_testds)):
test_rec = next(ds_iter)
carno = decode_carids[test_rec['feat_static_cat'][0]]
track_rec,lap_rec = test_rec['feat_dynamic_real']
yfcnt = np.sum(track_rec[-plen:])
pitcnt = np.sum(lap_rec[-plen:])
#laptype = ('0' if yfcnt==0 else '1') + ('0' if pitcnt==0 else '1')
lap = len(track_rec) - plen + 1
if lap not in lapmap:
#lapmap[lap] = laptype
lapmap[lap] = (yfcnt, pitcnt)
else:
oldtype = lapmap[lap]
lapmap[lap] = (yfcnt + oldtype[0], pitcnt + oldtype[1])
#split the rank_ret by laptype
types=['00','10','01','11']
acc_ret = []
for laptype in types:
check_ret = []
for item in rank_ret:
typecnt = lapmap[item[0]]
thetype = ('0' if typecnt[0]==0 else '1') + ('0' if typecnt[1]==0 else '1')
if thetype == laptype:
check_ret.append(item)
# get acc
metrics = get_acc(check_ret,plen)
recret = [testid, plen, laptype, len(check_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
#add all test
metrics = get_acc(rank_ret,plen)
recret = [testid, plen, 'aa', len(rank_ret)]
recret.extend(metrics[0])
acc_ret.append(recret)
_dfacc = pd.DataFrame(acc_ret, columns = ['testid','plen',
'type','reccnt','top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dflist.append(_dfacc)
dfacc = pd.concat(dflist, axis=0)
return dfacc
# In[9]:
def check_testds(datamode, test_cars=[]):
"""
report mae, etc
"""
for prediction_length in plens:
for half_moving_win in half:
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
def dotest(config):
acclist = []
dflist = []
for model in config.keys():
conf = config[model]
for teststr in conf.keys():
testfunc = teststr
datamode = conf[teststr]
df, dataret = run_test(runs, plens, half, trainids,
train_ratio, testfunc, datamode=datamode,models=[model])
#concat
acc = checkret_confusionmat(dataret, ref_testset,
testid = teststr, model=model)
dflist.append(df)
acclist.append(acc)
dfret = pd.concat(dflist, axis=0)
dfacc = pd.concat(acclist, axis=0)
return dfret, dfacc
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit_raw(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#return
return nextpit_map, max(nextpit)
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
save the lapstatus in laptime_data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
#save pit model related features
rec[COL_LAPSTATUS_SAVE,:] = rec[COL_LAPSTATUS, :]
rec[COL_CAUTION_LAPS_INSTINT_SAVE,:] = rec[COL_CAUTION_LAPS_INSTINT, :]
rec[COL_LAPS_INSTINT_SAVE, :] = rec[COL_LAPS_INSTINT, :]
def update_lapstatus(startlap):
"""
update the whole lapstatus data
"""
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno)
_pitmodel = None
def update_onets(rec, startlap, carno):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_LAPSTATUS,:] = 0
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
#rec[COL_LAPSTATUS,:] = rec[COL_LAPSTATUS_SAVE, :]
#rec[COL_CAUTION_LAPS_INSTINT,:] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :]
#rec[COL_LAPS_INSTINT, :] = rec[COL_LAPS_INSTINT_SAVE, :]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#save the samples, the farest samples
forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# works on lapstatus ground truth
def sim_onestep_ex(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + 1):
while(endpos <= endlap + prediction_length + 1):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
#forecasts_et[carno][2,:endpos] = rec[run_ts,:endpos].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
test_rec_cnt += 1
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
#go forward
endpos += prediction_length
#clear the unpred part
for carno in forecasts_et.keys():
forecasts_et[carno][2, endlap+1:] = np.nan
return forecasts_et
def sim_onestep(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_test = []
if events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if True:
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
#step = prediction_length
#for endpos in range(startlap + prediction_length, endlap, step):
endpos = startlap + prediction_length
while(endpos < endlap and endpos < totallen):
# RUN Prediction for single record
_test = []
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#go forward
endpos += prediction_length
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
return forecasts_et
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = True
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
# oracle sim
def run_simulation(predictor, prediction_length, freq,
datamode = MODE_ORACLE):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
allpits, pitmat, maxlap = get_pitlaps()
for pitlap in allpits:
print(f'start pitlap: {pitlap}')
nextpit, maxnext = get_nextpit(pitmat, pitlap)
#run one step sim from pitlap to maxnext
forecast = sim_onestep_ex(predictor, prediction_length, freq,
pitlap, maxnext,
oracle_mode = datamode
)
print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint(forecasts_et, pitlap, nextpit)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# ------------
#
def longterm_predict(predictor, runs, prediction_length, freq,
useeid = False,
run_ts= COL_LAPTIME,
test_event = 'Indy500-2018',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8,
log_transform = False,
verbose = False
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
"""
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
forecasts_et = {}
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#add statistics for adjust test
# trackstatus, lapstatus
mae = [0,0]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
if verbose:
#print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# adjust for disturbance analysis
if test_mode and test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
lap_status = rec[COL_LAPSTATUS, :].copy()
rec[COL_LAPSTATUS, :] = get_adjust_lapstatus(carno, lap_status)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
#context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = int(prediction_length/2)
elif half_moving_win == 2:
step = prediction_length
else:
step = 1
#bug fix, fixed the split point for all cars/ts
#for endpos in range(max_len, context_len+prediction_length,step):
for endpos in range(context_len+prediction_length, max_len, step):
#check if enough for this ts
if endpos > totallen:
break
# RUN Prediction for single record
_test = []
# check pitstop(stint) in the last prediction
# use ground truth of target before the last pitstop
if np.sum(lap_status[endpos-2*prediction_length:endpos-prediction_length]) > 0:
# pit found
# adjust endpos
pitpos = np.where(lap_status[endpos-2*prediction_length:endpos-prediction_length] == 1)
endpos = endpos-2*prediction_length + pitpos[0][0] + prediction_length + 1
#print('endpos:',endpos,pitpos)
#check if enough for this ts
if endpos > totallen:
break
#reset target, status
target_val = rec[run_ts,:].copy().astype(np.float32)
rec[COL_LAPSTATUS, :] = lap_status
rec[COL_TRACKSTATUS, :] = track_status
rec[COL_LAPS_INSTINT, :] = pitage_status
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
# predicting with status model
if test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
#lap_rec[-prediction_length:] = 0
if test_flag(oracle_mode, MODE_PREDPIT):
#predrec = get_track_model(track_rec, endpos, prediction_length)
#track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
#if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# # adjust the position of pit
# if np.sum(lap_rec[-prediction_length:]) > 0:
# adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
# lap_rec[-prediction_length:] = adjustrec
#okay, end of adjustments, test difference here
# rec_raw .vs. track_rec, lap_rec
track_rec_raw = rec_raw[COL_TRACKSTATUS, :endpos]
lap_rec_raw = rec_raw[COL_LAPSTATUS, :endpos]
mae[0] = mae[0] + np.nansum(np.abs(track_rec[-prediction_length:] - track_rec_raw[-prediction_length:]))
mae[1] = mae[1] + np.nansum(np.abs(lap_rec[-prediction_length:] - lap_rec_raw[-prediction_length:]))
if feature_mode == FEATURE_STATUS:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
}
)
elif feature_mode == FEATURE_PITAGE:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
}
)
# RUN Prediction here, for single record
test_ds = ListDataset(_test, freq=freq)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#get prediction result
forecast_laptime_mean = np.mean(forecasts[0].samples, axis=0).reshape((prediction_length))
#update target_val
target_val[endpos-prediction_length:endpos] = forecast_laptime_mean
rec[COL_TRACKSTATUS, endpos-prediction_length:endpos] = track_rec[-prediction_length:]
rec[COL_LAPSTATUS, endpos-prediction_length:endpos] = lap_rec[-prediction_length:]
rec[COL_LAPS_INSTINT, endpos-prediction_length:endpos] = pitage_rec[-prediction_length:]
#save forecast
#save the prediction
completed_laps = len(tss[0]) - prediction_length + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
forecasts_et[carno][2, len(tss[0]) - prediction_length:len(tss[0])] = forecast_laptime_mean.copy()
test_rec_cnt += 1
#one ts
if verbose:
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
#train_set.extend(_train)
#test_set.extend(_test)
#print(f'train len:{len(train_set)}, test len:{len(test_set)}, mae_track:{mae[0]},mae_lap:{mae[1]},')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
return forecasts_et
# In[12]:
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
def eval_full_samples_old(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, maxlap=200, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((maxlap))
full_samples[carno] = np.zeros((samplecnt, maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = 200
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500-2018', test_cars = [],
datamode = MODE_ORACLE,model = 'oracle'):
"""
dependency: test_event, test on one event only
"""
retdf = []
pred_ret = {}
ds_ret = {}
rank_result = {}
predictor = {}
#for model in models:
print('exp:',inspect.stack()[0][3],'model:', model,
'datamode:', get_modestr(datamode),'eval:', _exp_id )
predictor[model] = load_model(prediction_length, model,
trainid=trainid)
### create test dataset
forecasts = longterm_predict(predictor[model],
events_id[_test_event], prediction_length,freq,
oracle_mode=datamode,
run_ts = _run_ts,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio
)
#forecasts = eval_stint_rank(forecasts_et, prediction_length,
# global_start_offset[test_event])
return forecasts
# In[14]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def get_stint_acc(forecasts, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
pitpos_list = np.where(forecasts[carno][0,:] == 1)[0]
stintid = 0
startrank = true_rank[0]
for pitpos in pitpos_list:
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
stintid += 1
startrank = true_rank[pitpos-trim]
#end
if pitpos_list[-1] < lapnum - 1:
endrank = true_rank[-1]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[-1]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, stintid, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'stintid', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
laptime_data_save = None
freq = "1min"
decode_carids = {}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
def init(pitmodel = ''):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global dbid, _inlap_status
dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
laptimefile = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
laptime_data_save = laptime_data
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def runtest(modelname, model, datamode, naivemode, trainid= "2018"):
forecast = run_exp(2,2, train_ratio =0.1 , trainid = trainid,
datamode=datamode, model=model)
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return 0,0, 0,0
df = get_stint_acc(forecasts_et, currank = naivemode, trim= _trim)
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = mean_squared_error(df['pred_diff'].values , df['diff'].values)
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}')
return acc, mae, rmse, r2
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# In[20]:
def mytest():
savefile = f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}_trim{_trim}.csv'
if os.path.exists(savefile):
print(f'{savefile} already exists, bye')
retdf = pd.read_csv(savefile)
return
config = {'fulloracle':['oracle',MODE_ORACLE,False],
'laponly':['oracle',MODE_ORACLE_LAPONLY,False],
'notracklap':['oracle',MODE_NOTRACK + MODE_NOLAP,False],
'fullpred':['oracle',MODE_PREDTRACK + MODE_PREDPIT,False],
'curtrack':['oracle',MODE_TESTCURTRACK,False],
'zerotrack':['oracle',MODE_TESTZERO,False],
'predtrack':['oracle',MODE_PREDTRACK + MODE_ORACLE_TRACKONLY,False],
'predpit':['oracle',MODE_PREDPIT + MODE_ORACLE_LAPONLY,False],
'deepAR':['deepAR',MODE_ORACLE,False],
'naive':['zero',MODE_ORACLE, True],
}
cols = ['runid','acc','mae', 'rmse', 'r2']
result = []
for modelname in config.keys():
acc, mae, rmse, r2 = runtest(modelname, config[modelname][0],
config[modelname][1],config[modelname][2])
result.append([modelname, acc, mae, rmse, r2])
retd = pd.DataFrame(result,columns=cols)
retd.to_csv(f'stint-evluate-{_exp_id}-d{_dataset_id}-t{_test_event}-c{_context_ratio}.csv', float_format='%.3f')
return retd
# In[ ]:
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'stint_predictor_fastrun.py --datasetid datasetid --testevent testevent --task taskid '
parser = OptionParser(usage)
parser.add_option("--task", dest="taskid", default='laptime')
parser.add_option("--datasetid", dest="datasetid", default='indy2013-2018')
parser.add_option("--testevent", dest="testevent", default='Indy500-2018')
parser.add_option("--contextratio", dest="contextratio", default=0.)
parser.add_option("--trim", dest="trim", type=int, default=0)
opt, args = parser.parse_args()
#set global parameters
_dataset_id = opt.datasetid
_test_event = opt.testevent
_trim = opt.trim
if opt.taskid == 'laptime':
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'timediff':
_task_id = 'timediff' # rank,laptime, the trained model's task
_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
elif opt.taskid == 'rank':
_task_id = 'rank' # rank,laptime, the trained model's task
_run_ts = COL_RANK #COL_LAPTIME,COL_RANK
_exp_id='rank' #rank, laptime, laptim2rank, timediff2rank...
else:
logger.error('taskid:%s not support yet!', opt.taskid)
sys.exit(-1)
if _dataset_id=='' or _test_event=='':
logger.error('datasetid and testevnet cannot be null')
sys.exit(-1)
if _dataset_id.find('pitage') > 0:
_feature_mode = FEATURE_PITAGE
logger.info('Start evaluation, dataset=%s, testevent=%s, taskid=%s', _dataset_id, _test_event,
_task_id)
init()
mytest()
| 161,212 | 36.18012 | 310 | py |
rankpredictor | rankpredictor-master/src/indycar/model/quicktest_modules_sota.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepvar import DeepVAREstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.model.n_beats import NBEATSEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
from indycar.model.deepar import DeepAREstimator
from indycar.model.transformerw import TransformerWeightedEstimator
from indycar.model.transformerf import TransformerFullLossEstimator
from indycar.model.transformerwf import TransformerWeightedFullLossEstimator
from indycar.model.transformerwfm import TransformerWeightedFullLossMaskedEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator_sota as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.ListDataSetX import ListDatasetX
from gluonts.model.transformer import TransformerEstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from indycar.model.deep_factor import DeepFactorXEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
if gvar.use_driverid:
prefix = '../data/final/driverid/C_'
else:
prefix = '../data/final/C_'
if year>0:
inputfile = prefix + event +'-' + year + '.csv'
else:
inputfile = prefix + event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
if len(this_lap) == 0:
continue
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
#totallaps = len(laplist)
totallaps = max(laplist) + 1
print('totallaps:', event, totallaps, len(laplist))
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status, totallaps-1)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#if events[_data[0]] == test_event:
test_mode = False
if _data[0] == test_eventid:
test_mode = True
#elif _data[0] in train_events:
# test_mode = False
#else:
# #skip this event
# print('skip this event:', _data[0])
# continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
total_test_rec_cnt = 0
total_train_rec_cnt = 0
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_tsid = 0
for _data in _laptime_data:
_train = []
_test = []
if _data[0] == test_eventid:
test_mode = True
elif _data[0] in train_events:
test_mode = False
else:
#skip this event
print('skip this event:', _data[0])
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
if gvar.static_cat_type == 2:
static_cat = [_tsid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
# estimate the record count
total_train_rec_cnt += totallen -gvar.context_length + 1
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
real_features = get_real_features(feature_mode, rec, context_len)
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
# estimate the record count
total_train_rec_cnt += context_len - gvar.context_length + 1
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
total_test_rec_cnt += test_rec_cnt
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
#end of one ts
_tsid += 1
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}, trainreccnt:{total_train_rec_cnt}, testreccnt:{total_test_rec_cnt}', flush=True)
#train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
#test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
train_ds = ListDatasetX(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDatasetX(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True,
cardinality = 0):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=gvar.learning_rate,
patience = gvar.patience,
hybridize=gvar.hybridize,
num_batches_per_epoch=100
)
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepARW-Oracle' or model == 'RankNet':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerW-Oracle':
if use_feat_static:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
model_dim=30,
num_heads=6,
trainer=trainer
)
else:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
model_dim=28,
num_heads=7,
trainer=trainer
)
elif model == 'TransformerWF-Oracle' or model == 'RankNet-Transformer':
if use_feat_static:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'TransformerWFM-Oracle':
if use_feat_static:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer
)
else:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
#model_dim=28,
#num_heads=7,
trainer=trainer
)
elif model == 'TransformerF-Oracle':
if use_feat_static:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
else:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'deepARW-multi' or model == 'RankNet-Joint':
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
lags_seq=gvar._lags_seq,
weight_coef=gvar._weight_coef,
trainer=trainer,
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepVAR':
estimator = DeepVAREstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
target_dim = 2,
cardinality=cardinality,
trainer=trainer
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
#cardinality=[tsCnt],
cardinality=cardinality,
#lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepFactorX':
estimator = DeepFactorXEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
#cardinality=[tsCnt],
cardinality=cardinality,
num_hidden_local = gvar.deepFactorX_num_hidden_local,
#lags_seq=gvar._lags_seq,
trainer=trainer
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
past_length=context_length,
use_feat_static_cat=gvar._use_cate_feature,
cardinality=cardinality,
freq=freq,
use_feat_dynamic_real=gvar.use_dynamic_real,
trainer=trainer
)
elif model == 'nbeats':
estimator = NBEATSEstimator(
prediction_length=prediction_length,
context_length=context_length,
freq=freq,
trainer=trainer
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = context_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode, verbose=False)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start(bydf):', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
#ax.set_xlim((0,200))
ax.set_xlim((0,gvar.maxlap))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=gvar.freq) + gvar.prediction_length
date_index = pd.date_range(start, periods = len(sv) - gvar.prediction_length, freq = gvar.freq)
df2 = pd.DataFrame(sv[:- gvar.prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus_all(rankdata):
df12 = rankdata
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
pitlaps = sorted(set(list(pits[:,0].astype(int))))
cautionidx = np.where(caution == 1)
cautions = data[cautionidx]
cautionlaps = sorted(set(list(cautions[:,0].astype(int))))
return pitlaps, cautionlaps
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
# save to the endlap
#curlap = int(dfrec.startlap.values[0])
curlap = int(dfrec.endlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
def get_event_info(event):
#eid = event.split('-')[0]
return gvar._race_info[event]
| 104,909 | 32.592699 | 199 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_multiple_datasets/RankNet-QuickTest-Slim-beforemultidataset.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--debug", action="store_true", default=False, dest="debug")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# debug test
#_skip_overwrite = False
if opt.debug:
_debugstr = '-debug'
else:
_debugstr = ''
#gpuid = 5
#epochs = 1000
# new added parameters
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#_test_event = 'Indy500-2019'
#year = '2019'
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
# bias of the pitmodel
#_pitmodel_bias = 4
#train model: [deepARW-Oracle, deepAR]
# test the standard deepAR model training and testing
# DeepAR
#trainmodel = 'deepAR'
#testmodel = 'standard'
# Joint
#trainmodel = 'deepAR-multi'
#testmodel = 'joint'
#_joint_train = True
#loopcnt = 2
# transformer
#trainmodel = 'Transformer-Oracle'
#testmodel = 'Transformer-Oracle'
#trainmodel = 'Transformer'
#testmodel = 'Transformer'
#_joint_train = False
#loopcnt = 2
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid > 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
print('year:', year)
print('test_event:', _test_event)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar.events = events
gvar.events_id = events_id
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
# ### 2. make gluonts db
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# ### 5. final evaluation
# In[10]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
else:
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[year]['lasso'])
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[year][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#ml models -oracle
#for clf in ['rf','svr_lin','xgb']:
# print('year:',year,'clf:',clf)
# dfout, accret = eval_sync(preddf_oracle[year][clf],errlist[year])
# fsamples, ftss = df2samples(dfout)
# _, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
# retdata.append([year,models[clf]+'-Oracle',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#dfout, accret = eval_sync(ranknetdf[year]['oracle_mean'], errlist[year],force2int=True)
##fsamples, ftss = df2samples(dfout)
#fsamples, ftss = runs2samples(ranknet_ret[f'oracle-TIMEDIFF-{year}-noinlap-nopitage'],errlist[f'{year}'])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#retdata.append([year,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[11]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[12]:
if False:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 30,803 | 35.282686 | 185 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_multiple_datasets/quicktest_modules.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from gluonts.model.transformer import TransformerEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status, totallaps-1)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
#print('skip this event:', events[_data[0]])
print('skip this event:', _data[0])
break
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
#if events[_data[0]] == test_event:
if _data[0] == test_eventid:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = context_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
#ax.set_xlim((0,200))
ax.set_xlim((0,gvar.maxlap))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=gvar.freq) + gvar.prediction_length
date_index = pd.date_range(start, periods = len(sv) - gvar.prediction_length, freq = gvar.freq)
df2 = pd.DataFrame(sv[:- gvar.prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
def get_event_info(event):
eid = event.split('-')[0]
return gvar.events_info[eid]
| 99,564 | 32.808149 | 194 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_multiple_datasets/quicktest_simulator.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: stint_simulator_shortterm_pitmodel.py
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
import indycar.model.global_variables as gvar
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
COL_TARGET_PREDICTED = 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def update_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1,
#target_pred = None,
rank_col = COL_RANK,
verbose = False):
"""
update the features in laptime data
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
global laptime_data
#inplace update
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
# check shift len
if shift_len < 0:
shift_len = prediction_length
if verbose:
print('update_laptimedata shift len:', shift_len, test_idx)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
if test_idx >= 0:
_data = laptime_data[test_idx]
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if verbose:
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
# add new features
# add leaderPitCnt
#if _data[0]==0:
# verbose = True
#else:
# verbose = False
verbose = False
#
# be careful on leader_cnt for the future rank leaking
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
#if not target_pred:
# # update leader_cnt by predicted target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
# rank_col = COL_TARGET_PREDICTED,
# dest_col=dest_col, verbose = verbose)
#else:
# # update leader_cnt by true target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
rank_col = rank_col,
dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
#new_data.append([_data[0], _data[1], data2_newfeature])
laptime_data[test_idx][2] = data2_newfeature
return laptime_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
print('Set a new global laptime_data, test_event=%s, cnt=%d, shape=%s'%(_test_event, len(newdata), newdata[test_idx][2].shape))
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
# pit model is separate for each car
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-{_task_id}-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-{_task_id}-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle' or (model_name.find('pitmodel') == 0):
#
# debug for weighted model
#
#model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'joint' or model_name == 'deepAR-multi':
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# transformer
elif model_name == 'transformer' or model_name == 'Transformer':
model=f'Transformer-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'Transformer-MLP' or model_name == 'Transformer-Oracle':
model=f'Transformer-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-laponly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-laponly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR-Oracle
elif model_name == 'oracle-trackonly':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle-trackonly_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR' or model_name == 'standard':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
#model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
extend laptime data space to COL_ENDPOS
save the lapstatus in laptime_data
"""
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
print('sim_init: input laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape, test_idx)
#update this laptime record
if test_idx >= 0:
_data = laptime_data[test_idx][2]
dim1, dim2, dim3 = _data.shape
if dim2 < COL_ENDPOS:
#create a new data
newmat = np.zeros((dim1, COL_ENDPOS, dim3))
newmat[:,:dim2,:] = _data.copy()
else:
newmat = _data
#save pit model related features
newmat[:,COL_TRACKSTATUS_SAVE,:] = newmat[:,COL_TRACKSTATUS, :]
newmat[:,COL_LAPSTATUS_SAVE,:] = newmat[:,COL_LAPSTATUS, :]
newmat[:,COL_CAUTION_LAPS_INSTINT_SAVE,:] = newmat[:,COL_CAUTION_LAPS_INSTINT, :]
newmat[:,COL_LAPS_INSTINT_SAVE, :] = newmat[:,COL_LAPS_INSTINT, :]
# reset
if dim2 < COL_ENDPOS:
laptime_data[test_idx][2] = newmat
print('sim_init: after laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape)
def update_lapstatus(startlap, pitmodel_trainevent = 'Indy500'):
"""
update the whole lapstatus data
"""
#check the test_event, the same as the training event?
eid = _test_event.split('-')[0]
pitscale = gvar.events_info[pitmodel_trainevent][1] *1.0 / gvar.events_info[eid][1]
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno, pitscale = pitscale)
_pitmodel = None
def update_onets(rec, startlap, carno, pitscale = 1.):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_TRACKSTATUS,:] = 0
rec[COL_LAPSTATUS,:] = 0
rec[COL_TRACKSTATUS,:endpos] = rec[COL_TRACKSTATUS_SAVE, :endpos]
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
#scale
if pitscale != 1.0:
caution_laps_instint = int(caution_laps_instint / pitscale)
laps_instint = int(laps_instint / pitscale)
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint) + _pitmodel_bias
#update by pitscale
pred_pit_laps = int(pred_pit_laps * pitscale)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
global laptime_data
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
carno2rowid = {}
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
_test = []
for _data in _laptime_data:
if gvar.events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#save to carno2rowid map
if carno not in carno2rowid:
carno2rowid[carno] = rowid
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# prepare TARGET_PREDICTED in laptime
_data[2][rowid][COL_TARGET_PREDICTED, :] = np.nan
_data[2][rowid][COL_TARGET_PREDICTED, :totallen] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
# ground truth in forecasts_et, (RANK only)
#target_cols = [run_ts, COL_LAPSTATUS]
target_cols = [2, 0]
#target_val = rec[target_cols].copy().astype(np.float32)
target_val = forecasts_et[carno][target_cols,:endpos].astype(np.float)
_test.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
else:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq,one_dim_target= False if _joint_train else True)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _joint_train:
#
# joint train , multi dimensional target
# samples – Array of size (num_samples, prediction_length) (1D case) or (num_samples, prediction_length, target_dim)
#
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1,0].reshape(-1)
else:
# 1 dimensional target
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1].reshape(-1)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
# update laptime_data
rowid = carno2rowid[carno]
_data[2][rowid][COL_TARGET_PREDICTED,len(tss[idx]) - prediction_length:len(tss[idx])] = forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])]
#debug
if False:
#if carno==13:
#print('samples shape:', forecasts[idx].samples.shape)
print('tss shape:', tss[idx].shape, 'endpos:', endpos)
print('forecast mean:', forecast_laptime_mean, len(tss[idx]) - prediction_length)
print('target true:', forecasts_et[carno][1, len(tss[idx]) - prediction_length:len(tss[idx])])
print('target pred:', forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])])
#save the samples, the farest samples
#forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
forecasts_samples[carno][:] = forecasts_furtherest_samples
#update laptimedata by new predictions
#save predictions into laptime data
# update featues inlaptime data
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length,
rank_col = COL_TARGET_PREDICTED
)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
global laptime_data
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
#debug joint
#if True:
# xmat = forecasts_et[13][:, pitlap:pitlap+prediction_length]
# print('debug forecasts_et at ', pitlap)
# print(xmat)
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), gvar.maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((gvar.maxlap))
full_samples[carno] = np.zeros((samplecnt, gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def init(laptimefile, pitmodel = '', pitmodel_bias = 0):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global _inlap_status
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in gvar.events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
#laptimefile = f'laptime_rank_timediff_pit-oracle-{gvar.dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel_bias = pitmodel_bias
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(top1_pred),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_test_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# joint train the target of (rank, lapstatus)
_joint_train = False
_pitmodel_bias = 0
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
_trim = 0
# turn to use gvar
#years = ['2013','2014','2015','2016','2017','2018','2019']
#events = [f'Indy500-{x}' for x in years]
#events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
| 83,895 | 33.811618 | 201 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_onelag/quicktest_modules.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
from indycar.model.transformerw import TransformerWeightedEstimator
from indycar.model.transformerf import TransformerFullLossEstimator
from indycar.model.transformerwf import TransformerWeightedFullLossEstimator
from indycar.model.transformerwfm import TransformerWeightedFullLossMaskedEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from gluonts.model.transformer import TransformerEstimator
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = gvar.events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status, totallaps-1)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(laptime_data,
prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#if events[_data[0]] == test_event:
test_mode = False
if _data[0] == test_eventid:
test_mode = True
#elif _data[0] in train_events:
# test_mode = False
#else:
# #skip this event
# print('skip this event:', _data[0])
# continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data,
prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
joint_train = 0,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = gvar._feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = gvar.events_id[test_event]
train_events = gvar._train_events
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if _data[0] == test_eventid:
test_mode = True
elif _data[0] in train_events:
test_mode = False
else:
#skip this event
print('skip this event:', _data[0])
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = gvar._train_len if not test_mode else gvar._test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = gvar.global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
#global vars
prediction_length = gvar.prediction_length
context_length = gvar.context_length
freq = gvar.freq
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'Transformer-Oracle':
if use_feat_static:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'TransformerW-Oracle':
if use_feat_static:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerWeightedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'TransformerWF-Oracle':
if use_feat_static:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerWeightedFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'TransformerWFM-Oracle':
if use_feat_static:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerWeightedFullLossMaskedEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'TransformerF-Oracle':
if use_feat_static:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = TransformerFullLossEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'deepARW-multi':
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = context_length)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0, prepared_laptimedata = None,
epochs = 1000):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(gvar.LAPTIME_DATASET, pitmodel, pitmodel_bias= pitmodel_bias)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018, forecast_mode = 'shortterm'):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0, forecast_mode = 'shortterm'):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year, forecast_mode = forecast_mode)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, test_ds, predictor, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss, test_ds, predictor):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor= predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin, test_ds, predictor):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset = test_ds, # test dataset
predictor = predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, test_ds, predictor, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet,
test_ds, predictor, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
#ax.set_xlim((0,200))
ax.set_xlim((0,gvar.maxlap))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, gvar.rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=gvar.freq) + gvar.prediction_length
date_index = pd.date_range(start, periods = len(sv) - gvar.prediction_length, freq = gvar.freq)
df2 = pd.DataFrame(sv[:- gvar.prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
#offset = range(0, 200, 5)
offset = range(0, gvar.maxlap, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus_all(rankdata):
df12 = rankdata
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
pitlaps = sorted(set(list(pits[:,0].astype(int))))
cautionidx = np.where(caution == 1)
cautions = data[cautionidx]
cautionlaps = sorted(set(list(cautions[:,0].astype(int))))
return pitlaps, cautionlaps
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
# save to the endlap
#curlap = int(dfrec.startlap.values[0])
curlap = int(dfrec.endlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,gvar.maxlap))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
def test_global():
gvar._hi += 200
def get_event_info(event):
eid = event.split('-')[0]
return gvar.events_info[eid]
| 107,579 | 33.250239 | 194 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_onelag/quicktest_simulator.py | #!/usr/bin/env python
# coding: utf-8
# ### stint simulator
# based on: stint_simulator_shortterm_pitmodel.py
#
#
# long term predictor by continuously regressive forecasting at each pitstop
#
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# + rank prediction directly
# + rank prediction by laptime2rank,timediff2rank
# + laptime,lapstatus prediction
#
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import logging
from optparse import OptionParser
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random, math
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from indycar.model.pitmodel import PitModelSimple, PitModelMLP
from indycar.model.deeparw import DeepARWeightEstimator
import indycar.model.global_variables as gvar
import os
random.seed()
os.getcwd()
#GPUID = 1
# ### global constants
# In[3]:
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSEDTIME= 7
COL_LAP2NEXTPIT= 8
COL_TARGET_PREDICTED = 8
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
# oracle mode
MODE_ORACLE = 1024 # oracle = track + lap
MODE_ORACLE_TRACKONLY = 1
MODE_ORACLE_LAPONLY = 2
# oracle mode for training
MODE_NOLAP = 1
MODE_NOTRACK = 2
# predicting mode
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
_mode_map = {MODE_ORACLE:'MODE_ORACLE',MODE_ORACLE_TRACKONLY:'MODE_ORACLE_TRACKONLY',
MODE_ORACLE_LAPONLY:'MODE_ORACLE_LAPONLY',
MODE_TESTZERO:'MODE_TESTZERO',MODE_TESTCURTRACK:'MODE_TESTCURTRACK',
MODE_PREDTRACK:'MODE_PREDTRACK',MODE_PREDPIT:'MODE_PREDPIT',
MODE_DISTURB_CLEARTRACK:'MODE_DISTURB_CLEARTRACK',MODE_DISTURB_ADJUSTTRACK:'MODE_DISTURB_ADJUSTTRACK',
MODE_DISTURB_ADJUSTPIT:'MODE_DISTURB_ADJUSTPIT'}
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def update_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1,
#target_pred = None,
rank_col = COL_RANK,
verbose = False):
"""
update the features in laptime data
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
global laptime_data
#inplace update
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
# check shift len
if shift_len < 0:
shift_len = prediction_length
if verbose:
print('update_laptimedata shift len:', shift_len, test_idx)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
if test_idx >= 0:
_data = laptime_data[test_idx]
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if verbose:
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
# add new features
# add leaderPitCnt
#if _data[0]==0:
# verbose = True
#else:
# verbose = False
verbose = False
#
# be careful on leader_cnt for the future rank leaking
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
#if not target_pred:
# # update leader_cnt by predicted target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
# rank_col = COL_TARGET_PREDICTED,
# dest_col=dest_col, verbose = verbose)
#else:
# # update leader_cnt by true target
# data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
data2_intermediate = add_leader_cnt(_data[2], shift_len = shift_len,
rank_col = rank_col,
dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
#new_data.append([_data[0], _data[1], data2_newfeature])
laptime_data[test_idx][2] = data2_newfeature
return laptime_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
#
# interface with QuickTest
#
def set_laptimedata(newdata):
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
if test_idx >= 0:
print('Set a new global laptime_data, test_event=%s, cnt=%d, shape=%s'%(_test_event, len(newdata), newdata[test_idx][2].shape))
else:
print('Error, test event not found in laptimedata', _test_event)
laptime_data = newdata
#
#
#
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[5]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
# pit model is separate for each car
def load_model(prediction_length, model_name,trainid,epochs=1000, exproot='../models/remote'):
with mx.Context(mx.gpu(7)):
pred_ret = []
#rootdir = f'../models/{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
rootdir = f'{exproot}/{_dataset_id}/{_task_id}-{trainid}/'
# deepAR-Oracle
if model_name == 'deepAR-Oracle' or model_name == 'deepAR-MLP':
model=f'deepAR-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
#deeparw-oracle
elif model_name == 'weighted-oracle' or model_name == 'deepARW-Oracle' or model_name == 'deepARW-MLP':
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
elif model_name == 'oracle' or (model_name.find('pitmodel') == 0):
model=f'deepARW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'joint' or model_name == 'deepAR-multi':
model=f'deepAR-multi-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# transformer
elif model_name == 'transformer' or model_name == 'Transformer':
model=f'Transformer-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'Transformer-MLP' or model_name == 'Transformer-Oracle':
model=f'Transformer-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerW-MLP' or model_name == 'TransformerW-Oracle':
model=f'TransformerW-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerF-MLP' or model_name == 'TransformerF-Oracle':
model=f'TransformerF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerWF-MLP' or model_name == 'TransformerWF-Oracle':
model=f'TransformerWF-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
elif model_name == 'TransformerWFM-MLP' or model_name == 'TransformerWFM-Oracle':
model=f'TransformerWFM-Oracle-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...{model}...done!, ctx:{predictor.ctx}')
# deepAR
elif model_name == 'deepAR' or model_name == 'standard':
model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e{epochs}-r1_oracle_t{prediction_length}'
#model=f'deepAR-{_task_id}-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
# zero, zero keeps the rank unchange
elif model_name == 'zero':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = ZeroPredictor(freq= freq, prediction_length = prediction_length)
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=gvar.context_length)
else:
print(f'error: model {model_name} not support yet!')
return predictor
#
# simulator
#
def get_pitlaps(verbose = True, prediction_length=2):
"""
collect pitlaps info from COL_LAPSTATUS
input:
laptime_data ;
_test_event ;
events
_train_len ; minimum laps for a ts(otherwise, discard)
global_car_ids ; carno-> carid mapping
return:
pitlaps ; [] array of laps which are pitstop for some car
"""
run_ts = _run_ts
#all_pitlaps = [] # carno -> pitstops
all_pitlaps = {} # carno -> pitstops
max_lap = 0
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < _train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#print(f'carno:{carno}, totallen={totallen}')
#first, get target a copy
# target can be COL_XXSTATUS
lap_status = rec[COL_LAPSTATUS, :]
pitstops = np.where(lap_status == 1)[0]
# filter out inlaps (when _inlap_status > 0)
if _inlap_status !=0:
if _inlap_status == 1:
#remove inlaps in previous of pit stops
pitstops_tmp = [pitstops[x] for x in range(1, len(pitstops), 2)]
pitstops = pitstops_tmp
elif _inlap_status == 2:
#remove inlaps in next lap of pit stops
pitstops_tmp = [pitstops[x] for x in range(0, len(pitstops), 2)]
pitstops = pitstops_tmp
#all_pitlaps.append(list(pitstops))
all_pitlaps[carno] = list(pitstops)
# append the end lap
if _include_endpit:
all_pitlaps[carno].append(totallen-1)
#retrurn
allset = []
for l in all_pitlaps.keys():
allset.extend(all_pitlaps[l])
ret_pitlaps = sorted(list(set(allset)))
return ret_pitlaps, all_pitlaps, max_lap
def get_nextpit(pitlaps, startlap):
"""
input:
pitlaps ; array of pitstops for all the cars
startlap ;
return
nextpit ; nextpit for all the cars, nan for non-pit
"""
nextpit = []
nextpit_map = {}
nextpit_hit = []
#find hits
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
for lap in rec:
if lap ==startlap:
nextpit_hit.append(carno)
#normal search
for carno in pitlaps.keys():
rec = pitlaps[carno]
#search for startlap
found = False
for lap in rec:
if lap > startlap:
nextpit.append(lap)
nextpit_map[carno] = lap
found = True
break
if not found:
#nextpit.append(np.nan)
nextpit.append(-1)
#todo, set to the end
#nextpit.append(199)
#get maxpit from nextpit_hit
maxpit = -1
for carno in nextpit_hit:
if carno in nextpit_map:
maxpit = max(nextpit_map[carno], maxpit)
#return
#return nextpit_map, max(nextpit)
return nextpit_map, maxpit
def sim_init():
"""
extend laptime data space to COL_ENDPOS
save the lapstatus in laptime_data
"""
global laptime_data
#get test event
test_idx = -1
for idx, _data in enumerate(laptime_data):
if gvar.events[_data[0]] == _test_event:
test_idx = idx
break
print('sim_init: input laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape, test_idx)
#update this laptime record
if test_idx >= 0:
_data = laptime_data[test_idx][2]
dim1, dim2, dim3 = _data.shape
if dim2 < COL_ENDPOS:
#create a new data
newmat = np.zeros((dim1, COL_ENDPOS, dim3))
newmat[:,:dim2,:] = _data.copy()
else:
newmat = _data
#save pit model related features
newmat[:,COL_TRACKSTATUS_SAVE,:] = newmat[:,COL_TRACKSTATUS, :]
newmat[:,COL_LAPSTATUS_SAVE,:] = newmat[:,COL_LAPSTATUS, :]
newmat[:,COL_CAUTION_LAPS_INSTINT_SAVE,:] = newmat[:,COL_CAUTION_LAPS_INSTINT, :]
newmat[:,COL_LAPS_INSTINT_SAVE, :] = newmat[:,COL_LAPS_INSTINT, :]
# reset
if dim2 < COL_ENDPOS:
laptime_data[test_idx][2] = newmat
print('sim_init: after laptime_data, shape=', len(laptime_data), laptime_data[test_idx][2].shape)
def update_lapstatus(startlap, pitmodel_trainevent = 'Indy500'):
"""
update the whole lapstatus data
"""
#check the test_event, the same as the training event?
eid = _test_event.split('-')[0]
pitscale = gvar.events_info[pitmodel_trainevent][1] *1.0 / gvar.events_info[eid][1]
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
carno = _data[1][rowid]
update_onets(rec, startlap, carno, pitscale = pitscale)
_pitmodel = None
def update_onets(rec, startlap, carno, pitscale = 1.):
"""
update lapstatus after startlap basedon tsrec by pit prediction model
input:
tsrec ; a ts with multiple features COL_XXX
return:
tsrec ; updated for COL_LAPSTATUS, COL_CAUTION_LAPS_INSTINT, COL_LAPS_INSTINT
"""
# loop from startlap
nans, x= nan_helper(rec[_run_ts,:])
nan_count = np.sum(nans)
recx = rec[:, ~np.isnan(rec[_run_ts,:])]
# remove short ts
totallen = recx.shape[1]
if startlap >= totallen:
return
#totallen = tsrec.shape[1]
#ipdb.set_trace()
#reset status :startlap + 1
endpos = startlap + 1
rec[COL_TRACKSTATUS,:] = 0
rec[COL_LAPSTATUS,:] = 0
rec[COL_TRACKSTATUS,:endpos] = rec[COL_TRACKSTATUS_SAVE, :endpos]
rec[COL_LAPSTATUS,:endpos] = rec[COL_LAPSTATUS_SAVE, :endpos]
rec[COL_CAUTION_LAPS_INSTINT,:endpos] = rec[COL_CAUTION_LAPS_INSTINT_SAVE, :endpos]
rec[COL_LAPS_INSTINT, :endpos] = rec[COL_LAPS_INSTINT_SAVE, :endpos]
debug_report('start update_onets', rec[COL_LAPSTATUS], startlap, carno)
#
#loop on predict nextpit pos
curpos = startlap
while True:
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, curpos])
laps_instint = int(rec[COL_LAPS_INSTINT, curpos])
#scale
if pitscale != 1.0:
caution_laps_instint = int(caution_laps_instint / pitscale)
laps_instint = int(laps_instint / pitscale)
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint) + _pitmodel_bias
#update by pitscale
pred_pit_laps = int(pred_pit_laps * pitscale)
nextpos = curpos + pred_pit_laps - laps_instint
#debug
#if carno == 12:
# print('pitmodel: startlap={}, laps_instint={}, cuation_laps={}, \
# nextpos={}'.format(curpos, laps_instint, caution_laps_instint, nextpos))
if nextpos >= totallen:
nextpos = totallen - 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos+1] = caution_laps_instint
for _pos in range(curpos+1, nextpos+1):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
break
else:
#if (pred_pit_laps > laps_instint) and (nextpos < totallen):
# a valid pit
rec[COL_LAPSTATUS, nextpos] = 1
if _inlap_status != 0:
#inlap is 'P'
if _inlap_status == 1 :
#rec[COL_LAPSTATUS, nextpos-1] = _inlap_status
rec[COL_LAPSTATUS, nextpos-1] = 1
else:
#todo: no boudary check
#rec[COL_LAPSTATUS, nextpos+1] = _inlap_status
rec[COL_LAPSTATUS, nextpos+1] = 1
rec[COL_CAUTION_LAPS_INSTINT, curpos+1: nextpos] = caution_laps_instint
rec[COL_CAUTION_LAPS_INSTINT, nextpos] = 0
for _pos in range(curpos+1, nextpos):
rec[COL_LAPS_INSTINT, _pos] = rec[COL_LAPS_INSTINT, _pos - 1] + 1
rec[COL_LAPS_INSTINT, nextpos] = 0
#go forward
curpos = nextpos
debug_report('after update_onets', rec[COL_LAPSTATUS], startlap, carno)
return
def debug_pitmodel(startlap, carno, laps_instint, caution_laps_instint, samplecnt=1000):
"""
test the pitmodel
ret:
list of predictions of nextpit
"""
ret = []
for runid in range(samplecnt):
pred_pit_laps = _pitmodel.predict(caution_laps_instint, laps_instint)
nextpos = startlap + pred_pit_laps - laps_instint
ret.append(nextpos)
return ret
#debug tracking status
#status matrix : laps x ( endCol x 5 features)
#features: target, lapstatus, lap_instint, caution_instint, trackstatus
_status_mat = {} # stepid -> status matrix
def debug_report_mat(startlap, maxnext):
"""
output the status of the simulation
"""
fixedWidth = 5
endCol = 4
run_ts = _run_ts
for _data in laptime_data:
if gvar.events[_data[0]] != _test_event:
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_lap = int(np.max(ts_len))
#header carno | lap#...
#fixed width
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid]
_debug_carlist = []
#_debug_carlist = [12]
def debug_report_ts(msg, rec, startlap, carno, col= COL_LAPSTATUS):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[col, : startlap + 1])
print('='*10)
print(rec[col, startlap + 1:])
def debug_report(msg, rec, startlap, carno):
if carno not in _debug_carlist:
return
print(f'--------- {msg}: {startlap} ----------')
print(rec[: startlap + 1])
print('='*10)
print(rec[startlap + 1:])
def debug_print(msg):
if len(_debug_carlist) > 0:
print(msg)
# works on predicted lap status
def sim_onestep_pred(predictor, prediction_length, freq,
startlap, endlap,
oracle_mode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
input:
parameters ; same as longterm_predict, make_dataset_byevent
startlap
endlap
return:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
forecast_samples; save the samples, the farest samples
{}, carno -> samplecnt of the target
"""
global laptime_data
run_ts= _run_ts
test_event = _test_event
feature_mode = _feature_mode
context_ratio = _context_ratio
train_len = _train_len
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
test_set = []
forecasts_et = {}
forecasts_samples = {}
#_laptime_data = laptime_data.copy()
_laptime_data = laptime_data
endpos = startlap + prediction_length + 1
#while(endpos <= endlap + prediction_length + 1):
while(endpos <= endlap + prediction_length):
#make the testset
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
carno2rowid = {}
_test = []
for _data in _laptime_data:
if gvar.events[_data[0]] != test_event:
#jump out
continue
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
#ipdb.set_trace()
if verbose:
print(f'after ====event:{gvar.events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}, cars={_data[2].shape[0]}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
rec_raw = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
if verbose:
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if endpos > totallen:
continue
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
static_cat = [carid]
#save to carno2rowid map
if carno not in carno2rowid:
carno2rowid[carno] = rowid
#first, get target a copy
# target can be COL_XXSTATUS
#target_val = rec[run_ts,:].copy().astype(np.float32)
lap_status = rec[COL_LAPSTATUS, :].copy()
track_status = rec[COL_TRACKSTATUS, :].copy()
pitage_status = rec[COL_LAPS_INSTINT,:].copy()
# <3, totallen>
if carno not in forecasts_et:
forecasts_et[carno] = np.zeros((5, totallen))
forecasts_et[carno][:,:] = np.nan
forecasts_et[carno][0,:] = rec[COL_LAPSTATUS_SAVE, :].copy()
forecasts_et[carno][1,:] = rec[run_ts,:].copy().astype(np.float32)
forecasts_et[carno][2,:] = rec[run_ts,:].copy().astype(np.float32)
# for p-risk
forecasts_samples[carno] = np.zeros((sample_cnt))
# prepare TARGET_PREDICTED in laptime
_data[2][rowid][COL_TARGET_PREDICTED, :] = np.nan
_data[2][rowid][COL_TARGET_PREDICTED, :totallen] = rec[run_ts,:].copy().astype(np.float32)
# forecasts_et will be updated by forecasts
target_val = forecasts_et[carno][2,:]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK) or test_flag(oracle_mode, MODE_ORACLE_LAPONLY):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP) or test_flag(oracle_mode, MODE_ORACLE_TRACKONLY):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
# RUN Prediction for single record
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if test_flag(oracle_mode, MODE_PREDPIT):
#todo
#lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
# laps_instint,prediction_length)
#for pitage, use the predicted lap info to update pitage
start_pitage = pitage_rec[-prediction_length - 1]
for pos in range(prediction_length):
if lap_rec[-prediction_length + pos]==0:
pitage_rec[-prediction_length + pos] = start_pitage+1
else:
#new pit
start_pitage = 0
pitage_rec[-prediction_length + pos] = start_pitage
# add to test set
#train real features
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
# ground truth in forecasts_et, (RANK only)
#target_cols = [run_ts, COL_LAPSTATUS]
target_cols = [2, 0]
#target_val = rec[target_cols].copy().astype(np.float32)
target_val = forecasts_et[carno][target_cols,:endpos].astype(np.float)
_test.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
else:
_test.append({'target': target_val[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
}
)
test_rec_cnt += 1
#debug
#debug_report('simu_onestep', rec, startlap, carno, col= _run_ts)
#debug_report(f'simu_onestep: {startlap}-{endlap}, endpos={endpos}', target_val[:endpos], startlap, carno)
#jump out
# keep _data as current
break
# end of for each ts
# RUN Prediction here
test_ds = ListDataset(_test, freq=freq,one_dim_target= False if _joint_train else True)
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sample_cnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
#save the forecast results
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
if _joint_train:
#
# joint train , multi dimensional target
# samples – Array of size (num_samples, prediction_length) (1D case) or (num_samples, prediction_length, target_dim)
#
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples[:,:,0], axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1,0].reshape(-1)
else:
# 1 dimensional target
if _use_mean:
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_length))
else:
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_length))
forecasts_furtherest_samples = forecasts[idx].samples[:,-1].reshape(-1)
#update the forecasts , ready to use in the next prediction(regresive forecasting)
forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])] = forecast_laptime_mean.copy()
# update laptime_data
rowid = carno2rowid[carno]
_data[2][rowid][COL_TARGET_PREDICTED,len(tss[idx]) - prediction_length:len(tss[idx])] = forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])]
#debug
if False:
#if carno==13:
#print('samples shape:', forecasts[idx].samples.shape)
print('tss shape:', tss[idx].shape, 'endpos:', endpos)
print('forecast mean:', forecast_laptime_mean, len(tss[idx]) - prediction_length)
print('target true:', forecasts_et[carno][1, len(tss[idx]) - prediction_length:len(tss[idx])])
print('target pred:', forecasts_et[carno][2, len(tss[idx]) - prediction_length:len(tss[idx])])
#save the samples, the farest samples
#forecasts_samples[carno][:] = forecasts[idx].samples[:,-1].reshape(-1)
forecasts_samples[carno][:] = forecasts_furtherest_samples
#update laptimedata by new predictions
#save predictions into laptime data
# update featues inlaptime data
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length,
rank_col = COL_TARGET_PREDICTED
)
#go forward
endpos += prediction_length
return forecasts_et, forecasts_samples
# pred pit differs to true pit
def get_acc_onestint_pred(forecasts, startlap, nextpit, nextpit_pred, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
#lap status condition
if _inlap_status == 0:
lapstatus_cont = (forecasts[carno][0, startlap] == 1)
elif _inlap_status == 1:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap-1] == 1))
elif _inlap_status == 2:
lapstatus_cont = ((forecasts[carno][0, startlap] == 1) and (forecasts[carno][0, startlap+1] == 1))
if carno in _debug_carlist:
_debug_msg = 'startlap=%d, total=%d, pitstop status = %s, nextpit=%s, nextpit_pred=%s'%(startlap, lapnum, lapstatus_cont,
'none' if (carno not in nextpit) else nextpit[carno],
'none' if (carno not in nextpit_pred) else nextpit_pred[carno],
)
debug_print(_debug_msg)
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if ((startlap < lapnum) and (lapstatus_cont == True)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
#todo, use the true prediction that longer than maxlap
if _force_endpit_align:
if not carno in nextpit_pred:
#continue
pitpos_pred = pitpos
else:
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
pitpos_pred = pitpos
else:
if not carno in nextpit_pred:
continue
pitpos_pred = nextpit_pred[carno]
if np.isnan(pitpos_pred):
#set prediction to the end
continue
endrank = true_rank[pitpos-trim]
#endrank_pred = true_rank[pitpos_pred-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos_pred-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign,
pitpos, pitpos_pred
])
return rankret
# pred pit differs to true pit
def get_acc_onestep_shortterm(forecasts, startlap, endlap, trim=0, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
#if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
if startlap < lapnum:
startrank = true_rank[startlap-trim]
if np.isnan(endlap):
continue
endrank = true_rank[endlap-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[endlap-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
# works when pred pitstop == true pitstop
def get_acc_onestint(forecasts, startlap, nextpit, trim=2, currank = False):
"""
input:
trim ; steady lap of the rank (before pit_inlap, pit_outlap)
forecasts; carno -> [5,totallen]
0; lap_status
3; true_rank
4; pred_rank
startlap ; eval for the stint start from startlap only
nextpit ; array of next pitstop for all cars
output:
carno, stintid, startrank, endrank, diff, sign
"""
rankret = []
for carno in forecasts.keys():
lapnum = len(forecasts[carno][1,:])
true_rank = forecasts[carno][3,:]
pred_rank = forecasts[carno][4,:]
# check the lap status
if ((startlap < lapnum) and (forecasts[carno][0, startlap] == 1)):
startrank = true_rank[startlap-trim]
if not carno in nextpit:
continue
pitpos = nextpit[carno]
if np.isnan(pitpos):
continue
endrank = true_rank[pitpos-trim]
diff = endrank - startrank
sign = get_sign(diff)
if currank:
#force into currank model, zero doesn't work here
pred_endrank = startrank
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
else:
pred_endrank = pred_rank[pitpos-trim]
pred_diff = pred_endrank - startrank
pred_sign = get_sign(pred_diff)
rankret.append([carno, startlap, startrank,
endrank, diff, sign,
pred_endrank, pred_diff, pred_sign
])
return rankret
#
# simulation
#
def run_simulation_stint(predictor, prediction_length, freq,
carno, stintid, loopcnt,
datamode = MODE_ORACLE):
"""
simulation for one car at specific stint
input:
carno ;
stintid ;
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
#here, test only one stint for carno and stintid
pitlap = pitmat[carno][stintid]
for runid in range(loopcnt):
#for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#only for one car
maxnext = nextpit[carno]
maxnext_pred = nextpit_pred[carno]
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
#to get the forecast_sample, set max = mexnext_pred only,
#rather than max(maxnext,maxnext_pred)
#
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, maxnext_pred,
oracle_mode = datamode,
sample_cnt = 100
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
return
## evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
#add endlap
#_ = [x.append(maxnext_pred) for x in ret]
rankret.extend(ret)
## add to full_samples
#eval_full_samples(maxnext_pred,
# forecast_samples, forecast,
# full_samples, full_tss)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df, full_samples, full_tss, maxnext_pred
def run_simulation_pred(predictor, prediction_length, freq,
datamode = MODE_ORACLE, verbose = False):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
for pitlap in allpits:
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#2. get maxnext
allpits_pred, pitmat_pred, maxlap = get_pitlaps()
nextpit, maxnext = get_nextpit(pitmat, pitlap)
nextpit_pred, maxnext_pred = get_nextpit(pitmat_pred, pitlap)
#debug
if len(_debug_carlist) > 0:
_testcar = _debug_carlist[0]
if _testcar in nextpit and _testcar in nextpit_pred:
#print('nextpit:', nextpit[12], nextpit_pred[12], 'maxnext:', maxnext, maxnext_pred)
#debugstr = f'nextpit: {nextpit[]}, {nextpit_pred[12]}, maxnext: {maxnext}, {maxnext_pred}'
debugstr = 'nextpit: %d, %d, maxnext: %d, %d'%(nextpit[_testcar], nextpit_pred[_testcar]
, maxnext, maxnext_pred)
debug_print(debugstr)
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, max(maxnext, maxnext_pred),
oracle_mode = datamode,
sample_cnt = 100,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, 2)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, 2, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
# evaluate for this stint
ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred, trim=_trim)
rankret.extend(ret)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
return df
#prediction of shorterm + pred pit model
def run_simulation_shortterm(predictor, prediction_length, freq,
datamode = MODE_ORACLE,
sample_cnt = 100,
verbose = False
):
"""
step:
1. init the lap status model
2. loop on each pit lap
1. onestep simulation
2. eval stint performance
"""
global laptime_data
rankret = []
# the ground truth
allpits, pitmat, maxlap = get_pitlaps()
sim_init()
#init samples array
full_samples = {}
full_tss = {}
for pitlap in range(10, maxlap-prediction_length):
#1. update lap status
debug_print(f'start pitlap: {pitlap}')
if not (isinstance(_pitmodel, str) and _pitmodel == 'oracle'):
update_lapstatus(pitlap)
#update the featuers
laptime_data = update_laptimedata(prediction_length, freq,
test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
debug_print(f'update lapstatus done.')
#run one step sim from pitlap to maxnext
forecast, forecast_samples = sim_onestep_pred(predictor, prediction_length, freq,
pitlap, pitlap + prediction_length,
oracle_mode = datamode,
sample_cnt = sample_cnt,
verbose = verbose
)
debug_print(f'simulation done: {len(forecast)}')
# calc rank from this result
if _exp_id=='rank' or _exp_id=='timediff2rank':
forecasts_et = eval_stint_direct(forecast, prediction_length)
elif _exp_id=='laptime2rank':
forecasts_et = eval_stint_bylaptime(forecast, prediction_length, global_start_offset[_test_event])
else:
print(f'Error, {_exp_id} evaluation not support yet')
break
#debug joint
#if True:
# xmat = forecasts_et[13][:, pitlap:pitlap+prediction_length]
# print('debug forecasts_et at ', pitlap)
# print(xmat)
# evaluate for this stint
#ret = get_acc_onestint_pred(forecasts_et, pitlap, nextpit, nextpit_pred)
ret = get_acc_onestep_shortterm(forecasts_et, pitlap, pitlap+prediction_length)
rankret.extend(ret)
# add to full_samples
evalbyrank = False if _exp_id == 'laptime2rank' else True
eval_full_samples(pitlap + prediction_length,
forecast_samples, forecast,
full_samples, full_tss, evalbyrank=evalbyrank)
print('evalbyrank:', evalbyrank)
#add to df
df = pd.DataFrame(rankret, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
])
return df, full_samples, full_tss
def eval_stint_bylaptime(forecasts_et, prediction_length, start_offset):
"""
evaluate stint rank by laptime forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#
#
def eval_full_samples(lap, forecast_samples, forecast, full_samples, full_tss, evalbyrank = True):
"""
input:
lap ; lap number
forecast_samples; {} cano -> samples ore pred target
forecast ; {}, carno -> 5 x totallen matrix
1,: -> true target
2,: -> pred target
return:
full_samples
full_tss
"""
#get car list for this lap
carlist = list(forecast.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
samplecnt = len(forecast_samples[carlist[0]])
#diff_time = np.zeros((len(carlist), 1))
diff_time = np.zeros((len(carlist), gvar.maxlap))
diff_time_hat = np.zeros((len(carlist), samplecnt))
diff_time[:,:] = np.nan
diff_time_hat[:,:] = np.nan
for carno in carlist:
#diff_time[caridmap[carno],0] = forecast[carno][1, lap]
maxlen = len(forecast[carno][1, :])
diff_time[caridmap[carno],:maxlen] = forecast[carno][1, :]
diff_time_hat[caridmap[carno],:] = forecast_samples[carno]
if evalbyrank == True:
#calculate rank, support nan
idx = np.argsort(diff_time, axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time_hat, axis=0)
pred_rank = np.argsort(idx, axis=0)
else:
true_rank = diff_time
pred_rank = diff_time_hat
# save the rank back
for carno in carlist:
if carno not in full_tss:
#init
full_tss[carno] = np.zeros((gvar.maxlap))
full_samples[carno] = np.zeros((samplecnt, gvar.maxlap))
full_tss[carno][:] = np.nan
full_samples[carno][:,:] = np.nan
full_tss[carno][:lap] = true_rank[caridmap[carno]][:lap]
full_tss[carno][lap] = true_rank[caridmap[carno]][lap]
full_samples[carno][:, lap] = pred_rank[caridmap[carno],:]
return
def eval_stint_direct(forecasts_et, prediction_length):
"""
evaluate rank by timediff forecasting
input:
forecast ; {}, carno -> 5 x totallen matrix
0,: -> lapstatus
1,: -> true target
2,: -> pred target
3, -> placeholder
4, -> placeholder
start_offset[]; elapsed time for lap0, for one specific event
prediction_length ;
return:
forecasts
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
diff_time = np.zeros((2, len(carlist), maxlap))
diff_time[:,:] = np.nan
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
timediff_array = forecasts_et[carno][1,:]
diff_time[0, caridmap[carno],:lapnum] = timediff_array
timediff_array = forecasts_et[carno][2,:]
diff_time[1, caridmap[carno],:lapnum] = timediff_array
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(diff_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(diff_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
#calc rank
def eval_stint_rank(forecasts_et, prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
#get car list for this lap
carlist = list(forecasts_et.keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#convert to elapsedtime
#todo, Indy500 - > 200 max laps
maxlap = gvar.maxlap
elapsed_time = np.zeros((2, len(carlist), maxlap))
elapsed_time[:,:] = np.nan
for carno in forecasts_et.keys():
#start_offset is global var
if isinstance(start_offset, pd.core.frame.DataFrame):
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
lapnum = len(forecasts_et[carno][1,:])
laptime_array = forecasts_et[carno][1,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[0, caridmap[carno],:lapnum] = elapsed
laptime_array = forecasts_et[carno][2,:]
elapsed = np.cumsum(laptime_array) + offset
elapsed_time[1, caridmap[carno],:lapnum] = elapsed
#maxlap = max(maxlap, len(forecasts_et[carno][1,:]))
#calculate rank, support nan
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
# save the rank back
for carno in forecasts_et.keys():
lapnum = len(forecasts_et[carno][1,:])
forecasts_et[carno][3,:] = true_rank[caridmap[carno],:lapnum]
forecasts_et[carno][4,:] = pred_rank[caridmap[carno],:lapnum]
return forecasts_et
# In[13]:
def get_sign(diff):
if diff > 0:
sign = 1
elif diff < 0:
sign = -1
else:
sign = 0
return sign
def init(laptimefile, pitmodel = '', pitmodel_bias = 0):
global global_carids, laptime_data, global_start_offset, decode_carids,_pitmodel
global _inlap_status
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
stagedata = {}
for event in gvar.events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata = stagedata[event]
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
# start from here
import pickle
#with open('laptime_rank_timediff_fulltest-oracle-%s.pickle'%year, 'rb') as f:
#laptimefile = f'laptime_rank_timediff_pit-oracle-{gvar.dbid}.pickle'
with open(laptimefile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
decode_carids={carid:carno for carno, carid in global_carids.items()}
print(f'init: load dataset {laptimefile} with {len(laptime_data)} races, {len(global_carids)} cars')
if not isinstance(pitmodel, str):
_pitmodel = PitModelSimple(top8=(True if pitmodel==0 else False))
print(f'init pitmodel as PitModelSimple')
elif pitmodel=='oracle':
_pitmodel = pitmodel
else:
_pitmodel_bias = pitmodel_bias
_pitmodel = PitModelMLP(modelfile = pitmodel)
print(f'init pitmodel as PitModelMLP(pitmodel)')
def get_evalret(df):
correct = df[df['sign']==df['pred_sign']]
acc = len(correct)/len(df)
mae1 = np.sum(np.abs(df['pred_diff'].values - df['diff'].values))/len(df)
rmse = math.sqrt(mean_squared_error(df['pred_diff'].values , df['diff'].values))
mae = mean_absolute_error(df['pred_diff'].values , df['diff'].values)
r2 = r2_score(df['pred_diff'].values , df['diff'].values)
#naive result
n_correct = df[df['startrank']==df['endrank']]
acc_naive = len(n_correct)/len(df)
mae_naive = np.mean(np.abs(df['diff'].values))
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}'%(
acc, mae, rmse, r2, len(df),
acc_naive, mae_naive, rmse_naive, r2_naive
)
)
return np.array([[acc, mae, rmse, r2],[acc_naive, mae_naive, rmse_naive, r2_naive]])
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
#return acc, mae, rmse, r2
def get_evalret_shortterm(df):
maxlap = np.max(df['startlap'].values)
minlap = np.min(df['startlap'].values)
top1 = df[df['endrank']==0]
top1_pred = df[df['pred_endrank']==0]
correct = top1_pred[top1_pred['pred_endrank']==top1_pred['endrank']]
#acc = len(correct)/len(top1_pred)
acc = len(correct)/(len(top1_pred) + 1e-10)
rmse = math.sqrt(mean_squared_error(df['pred_endrank'].values , df['endrank'].values))
mae = mean_absolute_error(df['pred_endrank'].values , df['endrank'].values)
r2 = r2_score(df['pred_endrank'].values , df['endrank'].values)
mae1 = np.sum(np.abs(df['pred_endrank'].values - df['endrank'].values))
mae1 = mae1/ (maxlap -minlap +1)
#naive result
top1_naive = df[df['startrank']==0]
n_correct = top1_naive[top1_naive['startrank']==top1_naive['endrank']]
acc_naive = len(n_correct)/len(top1_naive)
mae_naive = np.mean(np.abs(df['diff'].values))
mae_naive1 = np.sum(np.abs(df['diff'].values))
mae_naive1 = mae_naive1 / (maxlap - minlap + 1)
rmse_naive = math.sqrt(mean_squared_error(df['startrank'].values , df['endrank'].values))
r2_naive = r2_score(df['startrank'].values , df['endrank'].values)
#print(f'pred: acc={acc}, mae={mae},{mae1}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}, {mae_naive1}')
#print(f'pred: acc={acc}, mae={mae}, rmse={rmse},r2={r2}, acc_naive={acc_naive}, mae_naive={mae_naive}')
correct = df[df['sign']==df['pred_sign']]
signacc = len(correct)/len(df)
naive_signcorrect = df[df['sign'] == 0]
naive_signacc = len(naive_signcorrect) / len(df)
print('testset size:', len(df), 'minlap:', minlap, 'maxlap:', maxlap)
print('model: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1_pred: {%d}, top1_naive: {%d}\n \
naive: acc={%.2f}, mae={%.2f}, rmse={%.2f},r2={%.2f}, top1: {%d}'%(
acc, mae, rmse, r2, len(top1_pred), len(top1_naive),
acc_naive, mae_naive, rmse_naive, r2_naive, len(top1)
)
)
return np.array([[acc, mae, rmse, r2, signacc],[acc_naive, mae_naive, rmse_naive, r2_naive, naive_signacc]])
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
_dataset_id = 'indy2013-2018'
_test_event = 'Indy500-2018'
#_test_event = 'Indy500-2019'
_train_len = 40
_test_train_len = 40
_feature_mode = FEATURE_STATUS
_context_ratio = 0.
#_task_id = 'timediff' # rank,laptime, the trained model's task
#_run_ts = COL_TIMEDIFF #COL_LAPTIME,COL_RANK
#_exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
#
#_task_id = 'lapstatus' # rank,laptime, the trained model's task
#_run_ts = COL_LAPSTATUS #COL_LAPTIME,COL_RANK
#_exp_id='lapstatus' #rank, laptime, laptim2rank, timediff2rank...
_task_id = 'laptime' # rank,laptime, the trained model's task
_run_ts = COL_LAPTIME #COL_LAPTIME,COL_RANK
_exp_id='laptime2rank' #rank, laptime, laptim2rank, timediff2rank...
_inlap_status = 1
_force_endpit_align = False
_include_endpit = False
#_use_mean = False # mean or median to get prediction from samples
_use_mean = True # mean or median to get prediction from samples
# joint train the target of (rank, lapstatus)
_joint_train = False
_pitmodel_bias = 0
# In[16]:
global_start_offset = {}
global_carids = {}
laptime_data = None
freq = "1min"
decode_carids = {}
_trim = 0
# turn to use gvar
#years = ['2013','2014','2015','2016','2017','2018','2019']
#events = [f'Indy500-{x}' for x in years]
#events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v9_p{_inlap_status}'
| 84,929 | 33.950617 | 201 | py |
rankpredictor | rankpredictor-master/src/indycar/model/save/before_onelag/RankNet-QuickTest-Slim.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
parser.add_option("--suffix", default='', dest="suffix")
parser.add_option("--dataroot", default='test/', dest="dataroot")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# new added parameters
_draw_figs = False
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid >= 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
if opt.suffix:
_debugstr = f'-{opt.suffix}'
else:
_debugstr = ''
dataroot = opt.dataroot
#discard year
year = _test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
#print('year:', year)
print('test_event:', _test_event)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
#events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
#events_totalmiles=[256,500,372,268,500,310]
#events_laplen = [1.022,2.5,1.5,0.894,2.5,1.25]
events_info = {
'Phoenix':(256, 1.022, 250),'Indy500':(500,2.5,200),'Texas':(372,1.5,248),
'Iowa':(268,0.894,300),'Pocono':(500,2.5,200),'Gateway':(310,1.25,248)
}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events.extend(['Phoenix-2018','Texas-2018','Texas-2019','Pocono-2018','Pocono-2019','Iowa-2018','Iowa-2019',
'Gateway-2018','Gateway-2019'])
events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
_train_events = [events_id[x] for x in [f'Indy500-{x}' for x in ['2013','2014','2015','2016','2017']]]
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
version = f'IndyCar-d{len(events)}-endlap'
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar.events = events
gvar.events_id = events_id
gvar.events_info = events_info
gvar._train_events = _train_events
gvar.maxlap = get_event_info(_test_event)[2]
gvar.dbid = dbid
gvar.LAPTIME_DATASET = LAPTIME_DATASET
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
# ### 2. make gluonts db
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# In[10]:
# ### 5. final evaluation
# In[11]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[event] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-alldata-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','SignAcc', 'MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[_test_event][1]
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-oracle-t0-tuned.pickle'
else:
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
#discard old year
#year <- _test_event
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[_test_event]['lasso'])
pitlaps, cautionlaps = get_racestatus_all(rankdata)
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# split evaluation
if True:
for laptype in ['normalpit','cautionpit']:
# select the set
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - set(cautionlaps)
if laptype == 'normalpit':
sellaps = normallaps
clearlaps = cautionlaps
else:
sellaps = cautionlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-1 for x in sellaps]
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
# evaluation start
for clf in ['currank','rf','svr_lin','xgb']:
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
#debug
if clf == 'currank':
print('currank min startlap:', np.min(dfout.startlap.values))
print('currank startlaps:', dfout.startlap.values)
print('currank endlaps:', dfout.endlap.values)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret(dfout)[0]
fsamples, ftss = df2samples_ex(dfout)
#fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,models[clf],configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
print('ranknet min startlap:', np.min(dfout.startlap.values))
print('ranknet startlaps:', dfout.startlap.values)
print('ranknet endlaps:', sorted(set(list((dfout.endlap.values)))))
print('sel laps::', startlaps)
print('clear laps::', clearidx)
print('cautionlaps:', cautionlaps)
dfoutx = dfout[dfout['startlap'].isin(clearidx)]
#dfoutx = dfout[dfout['endlap'].isin(clearidx)]
print('matched cleared endlaps::', sorted(set(list((dfoutx.endlap.values)))))
dfout = dfout[dfout['startlap'].isin(startlaps)]
#dfout = dfout[dfout['endlap'].isin(startlaps)]
print('matched endlaps::', sorted(set(list((dfout.endlap.values)))))
accret = stint.get_evalret(dfout)[0]
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,f'{testmodel}',configname,laptype,accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# end of evaluation
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[12]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[13]:
if _draw_figs:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#set gobal variable
gvar.rankdata = rankdata
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 34,588 | 36.393514 | 185 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
from gluonts.support.util import weighted_average
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerWeightedFullLossNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerWeightedFullLossTrainingNetwork(TransformerWeightedFullLossNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
#concat all targets
all_output = F.concat(
enc_out,
dec_output,
dim=1
)
# compute loss
#distr_args = self.proj_dist_args(dec_output)
distr_args = self.proj_dist_args(all_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
# original loss
#loss = distr.loss(future_target)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#loss = distr.loss(future_target)
## (batch_size, seq_len, *target_shape)
#observed_values = F.concat(
# past_observed_values.slice_axis(
# axis=1,
# begin=self.history_length - self.context_length,
# end=self.history_length,
# ),
# future_observed_values,
# dim=1,
#)
## mask the loss at one time step iff one or more observations is missing in the target dimensions
## (batch_size, seq_len)
#loss_weights1 = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# deal with imbalance problem
# set higher weight for loss at time step when target changes
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
#if _hybridized_:
if False:
r = F.slice_axis(target, axis=1, begin=-2, end=None)
l = F.slice_axis(target, axis=1, begin=-4, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
loss_weights2 = w
else:
r = F.slice_axis(target, axis=1, begin=2, end=None)
l = F.slice_axis(target, axis=1, begin=0, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
s = F.slice_axis(target, axis=1, begin=0, end=2)
z = F.ones_like(s)
loss_weights2 = F.concat(z, w)
#loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
#
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights2, axis=1
)
# need to mask possible nans and -inf
#loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
#return weighted loss of future
#loss = F.slice_axis(weighted_loss, axis=1, begin=-2, end=None)
loss = weighted_loss
return loss.mean()
class TransformerWeightedFullLossPredictionNetwork(TransformerWeightedFullLossNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 19,509 | 33.168126 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import (
TransformerWeightedFullLossPredictionNetwork,
TransformerWeightedFullLossTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerWeightedFullLossEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerWeightedFullLossTrainingNetwork:
training_network = TransformerWeightedFullLossTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerWeightedFullLossPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,301 | 37.807571 | 100 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_oldrnnmodel/RNNModel.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock, nn
# First-party imports
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
class RNNModel(HybridBlock):
@validated()
def __init__(
self,
mode,
num_hidden,
num_layers,
num_output,
bidirectional=False,
**kwargs,
):
super(RNNModel, self).__init__(**kwargs)
self.num_output = num_output
with self.name_scope():
self.rnn = RNN(
mode=mode,
num_hidden=num_hidden,
num_layers=num_layers,
bidirectional=bidirectional,
)
self.decoder = nn.Dense(
num_output, in_units=num_hidden, flatten=False
)
def hybrid_forward(self, F, inputs):
return self.decoder(self.rnn(inputs))
| 1,462 | 28.26 | 75 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_oldrnnmodel/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.model.common import Tensor
import indycar.model.global_variables as gvar
class DeepFactorXNetworkBase(HybridBlock):
def __init__(
self,
global_model: HybridBlock,
local_model: HybridBlock,
embedder: FeatureEmbedder,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.global_model = global_model
self.local_model = local_model
self.embedder = embedder
with self.name_scope():
self.loading = nn.Dense(
units=global_model.num_output, use_bias=False
)
self._debug_print = True
def assemble_features(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> Tensor: # (batch_size, history_length, num_features)
# todo: this is shared by more than one places, and should be a general routine
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
# a workaround when you wish to repeat without knowing the number
# of repeats
helper_ones = F.ones_like(
F.slice_axis(time_feat, axis=2, begin=-1, end=None)
)
# (batch_size, history_length, num_features * embedding_size)
repeated_cat = F.batch_dot(
helper_ones, F.expand_dims(embedded_cat, axis=1)
)
# putting together all the features
input_feat = F.concat(repeated_cat, time_feat, dim=2)
#debug
if gvar.hybridize==False:
#if gvar.hybridize==False and self._debug_print:
#if True:
print('embedded_cat size:', embedded_cat.shape, 'time_feat size:', time_feat.shape, 'input_feat size:', input_feat.shape)
self._debug_print = False
return embedded_cat, input_feat
def compute_global_local(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> (Tensor, Tensor): # both of size (batch_size, history_length, 1)
cat, local_input = self.assemble_features(
F, feat_static_cat, time_feat
)
loadings = self.loading(cat) # (batch_size, num_factors)
global_factors = self.global_model(
time_feat
) # (batch_size, history_length, num_factors)
fixed_effect = F.batch_dot(
global_factors, loadings.expand_dims(axis=2)
) # (batch_size, history_length, 1)
random_effect = F.log(
F.exp(self.local_model(local_input)) + 1.0
) # (batch_size, history_length, 1)
return F.exp(fixed_effect), random_effect
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
def negative_normal_likelihood(self, F, y, mu, sigma):
return (
F.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * F.square((y - mu) / sigma)
)
class DeepFactorXTrainingNetwork(DeepFactorXNetworkBase):
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
past_time_feat: Tensor,
# (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length)
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
A batch of negative log likelihoods.
"""
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, past_time_feat
)
loss = self.negative_normal_likelihood(
F, past_target.expand_dims(axis=2), fixed_effect, random_effect
)
return loss
class DeepFactorXPredictionNetwork(DeepFactorXNetworkBase):
@validated()
def __init__(
self, prediction_len: int, num_parallel_samples: int, **kwargs
) -> None:
super().__init__(**kwargs)
self.prediction_len = prediction_len
self.num_parallel_samples = num_parallel_samples
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
future_time_feat
Shape: (batch_size, prediction_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
Samples of shape (batch_size, num_samples, prediction_length).
"""
time_feat = F.concat(past_time_feat, future_time_feat, dim=1)
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, time_feat
)
samples = F.concat(
*[
F.sample_normal(fixed_effect, random_effect)
for _ in range(self.num_parallel_samples)
],
dim=2,
) # (batch_size, train_len + prediction_len, num_samples)
pred_samples = F.slice_axis(
samples, axis=1, begin=-self.prediction_len, end=None
) # (batch_size, prediction_len, num_samples)
return pred_samples.swapaxes(1, 2)
| 6,537 | 30.892683 | 133 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_dropout/RNNModel.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock, nn
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
class RNNModel(HybridBlock):
@validated()
def __init__(
self,
mode,
num_hidden,
num_layers,
num_output,
bidirectional=False,
dropout_rate = 0.1,
context_length = 60,
**kwargs,
):
super(RNNModel, self).__init__(**kwargs)
self.num_output = num_output
self.context_length = context_length
RnnCell = mx.gluon.rnn.LSTMCell
with self.name_scope():
#self.rnn = RNN(
# mode=mode,
# num_hidden=num_hidden,
# num_layers=num_layers,
# bidirectional=bidirectional,
#)
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_hidden)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=np.float32)
self.decoder = nn.Dense(
num_output, in_units=num_hidden, flatten=False
)
def hybrid_forward(self, F, inputs):
outputs, state = self.rnn.unroll(
inputs=inputs,
#length=subsequences_length,
length= self.context_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
#dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
return self.decoder(outputs)
#return self.decoder(self.rnn(inputs))
| 2,668 | 30.034884 | 79 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_dropout/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.model.common import Tensor
import indycar.model.global_variables as gvar
class DeepFactorXNetworkBase(HybridBlock):
def __init__(
self,
global_model: HybridBlock,
local_model: HybridBlock,
embedder: FeatureEmbedder,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.global_model = global_model
self.local_model = local_model
self.embedder = embedder
with self.name_scope():
self.loading = nn.Dense(
units=global_model.num_output, use_bias=False
)
self._debug_print = True
def assemble_features(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> Tensor: # (batch_size, history_length, num_features)
# todo: this is shared by more than one places, and should be a general routine
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
# a workaround when you wish to repeat without knowing the number
# of repeats
helper_ones = F.ones_like(
F.slice_axis(time_feat, axis=2, begin=-1, end=None)
)
# (batch_size, history_length, num_features * embedding_size)
repeated_cat = F.batch_dot(
helper_ones, F.expand_dims(embedded_cat, axis=1)
)
# putting together all the features
input_feat = F.concat(repeated_cat, time_feat, dim=2)
#debug
if gvar.hybridize==False:
#if gvar.hybridize==False and self._debug_print:
#if True:
print('embedded_cat size:', embedded_cat.shape, 'time_feat size:', time_feat.shape, 'input_feat size:', input_feat.shape)
self._debug_print = False
return embedded_cat, input_feat
def compute_global_local(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> (Tensor, Tensor): # both of size (batch_size, history_length, 1)
cat, local_input = self.assemble_features(
F, feat_static_cat, time_feat
)
loadings = self.loading(cat) # (batch_size, num_factors)
global_factors = self.global_model(
time_feat
) # (batch_size, history_length, num_factors)
fixed_effect = F.batch_dot(
global_factors, loadings.expand_dims(axis=2)
) # (batch_size, history_length, 1)
random_effect = F.log(
F.exp(self.local_model(local_input)) + 1.0
) # (batch_size, history_length, 1)
return F.exp(fixed_effect), random_effect
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
def negative_normal_likelihood(self, F, y, mu, sigma):
return (
F.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * F.square((y - mu) / sigma)
)
class DeepFactorXTrainingNetwork(DeepFactorXNetworkBase):
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
past_time_feat: Tensor,
# (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length)
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
A batch of negative log likelihoods.
"""
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, past_time_feat
)
loss = self.negative_normal_likelihood(
F, past_target.expand_dims(axis=2), fixed_effect, random_effect
)
return loss
class DeepFactorXPredictionNetwork(DeepFactorXNetworkBase):
@validated()
def __init__(
self, prediction_len: int, num_parallel_samples: int, **kwargs
) -> None:
super().__init__(**kwargs)
self.prediction_len = prediction_len
self.num_parallel_samples = num_parallel_samples
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
future_time_feat
Shape: (batch_size, prediction_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
Samples of shape (batch_size, num_samples, prediction_length).
"""
time_feat = F.concat(past_time_feat, future_time_feat, dim=1)
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, time_feat
)
samples = F.concat(
*[
F.sample_normal(fixed_effect, random_effect)
for _ in range(self.num_parallel_samples)
],
dim=2,
) # (batch_size, train_len + prediction_len, num_samples)
pred_samples = F.slice_axis(
samples, axis=1, begin=-self.prediction_len, end=None
) # (batch_size, prediction_len, num_samples)
return pred_samples.swapaxes(1, 2)
| 6,537 | 30.892683 | 133 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-fullloss/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-fullloss/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-fullloss/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-fullloss/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
from gluonts.support.util import weighted_average
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerFullLossNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerFullLossTrainingNetwork(TransformerFullLossNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
#concat all targets
all_output = F.concat(
enc_out,
dec_output,
dim=1
)
# compute loss
#distr_args = self.proj_dist_args(dec_output)
distr_args = self.proj_dist_args(all_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
# original loss
#loss = distr.loss(future_target)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#loss = distr.loss(future_target)
## (batch_size, seq_len, *target_shape)
#observed_values = F.concat(
# past_observed_values.slice_axis(
# axis=1,
# begin=self.history_length - self.context_length,
# end=self.history_length,
# ),
# future_observed_values,
# dim=1,
#)
## mask the loss at one time step iff one or more observations is missing in the target dimensions
## (batch_size, seq_len)
#loss_weights1 = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# deal with imbalance problem
# set higher weight for loss at time step when target changes
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
##if _hybridized_:
#if True:
# r = F.slice_axis(target, axis=1, begin=-2, end=None)
# l = F.slice_axis(target, axis=1, begin=-4, end=-2)
# w1 = F.ones_like(r)
# w9 = F.ones_like(r)*9
# w = F.where(r==l, w1, w9)
# loss_weights2 = w
#else:
# r = F.slice_axis(target, axis=1, begin=2, end=None)
# l = F.slice_axis(target, axis=1, begin=0, end=-2)
# w1 = F.ones_like(r)
# w9 = F.ones_like(r)*9
# w = F.where(r==l, w1, w9)
# s = F.slice_axis(target, axis=1, begin=0, end=2)
# z = F.ones_like(s)
# loss_weights2 = F.concat(z, w)
##loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
##
#weighted_loss = weighted_average(
# F=F, x=loss, weights=loss_weights2, axis=1
#)
## need to mask possible nans and -inf
##loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
##return weighted loss of future
##loss = F.slice_axis(weighted_loss, axis=1, begin=-2, end=None)
#loss = weighted_loss
return loss.mean()
class TransformerFullLossPredictionNetwork(TransformerFullLossNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 19,494 | 33.201754 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-fullloss/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import (
TransformerFullLossPredictionNetwork,
TransformerFullLossTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerFullLossEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerFullLossTrainingNetwork:
training_network = TransformerFullLossTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerFullLossPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,253 | 37.656151 | 100 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-original/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
self.batch_size = batch_size
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
self.batch_size,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 23,995 | 30.994667 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-original/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 21,761 | 34.85173 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-original/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
#else get_lags_for_frequency(freq_str=freq, num_lags=1)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,713 | 37.18018 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparw-old/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARWNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARWTrainingNetwork(DeepARWNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepARW, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
#loss_weights = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# special setting for pitage, 0 for pitstop
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#loss_weights = (observed_values>0)*1e-8 + (observed_values==0)*1.
loss_weights = (observed_values>0)*1e-8 + (observed_values==0)*1.
# nextpit, observed_values == 1, next is pitstop
#pitstop = F.where(observed_values == 1)[0] + 1
#loss_weights = F.zeros_like(observed_values)
#_weights = np.zeros_like(observed_values)
#_weights[pitstop] = 1.
#loss_weights = _weights
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
weighted_loss = weighted_loss * 40.
return weighted_loss, loss
class DeepARWPredictionNetwork(DeepARWNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,395 | 34.948636 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparw-old/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARWPredictionNetwork, DeepARWTrainingNetwork
class DeepARWEstimator(GluonEstimator):
"""
Construct a DeepARW estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepARW Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARWTrainingNetwork:
return DeepARWTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARWPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,653 | 37.114458 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparw-new/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARWeightNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARWeightTrainingNetwork(DeepARWeightNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
#loss_weights = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# special setting for pitage, 0 for pitstop
loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
# need to mask possible nans and -inf
loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
return weighted_loss, loss
class DeepARWeightPredictionNetwork(DeepARWeightNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,048 | 34.969005 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparw-new/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARWeightPredictionNetwork, DeepARWeightTrainingNetwork
class DeepARWeightEstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dummy_value=self.distr_output.value_in_support,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
dummy_value=self.distr_output.value_in_support,
),
]
)
def create_training_network(self) -> DeepARWeightTrainingNetwork:
return DeepARWeightTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARWeightPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,817 | 37.377246 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-inuse/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 21,761 | 34.85173 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-inuse/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_v0/RNNModel.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock, nn
# First-party imports
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
class RNNModel(HybridBlock):
@validated()
def __init__(
self,
mode,
num_hidden,
num_layers,
num_output,
bidirectional=False,
**kwargs,
):
super(RNNModel, self).__init__(**kwargs)
self.num_output = num_output
with self.name_scope():
self.rnn = RNN(
mode=mode,
num_hidden=num_hidden,
num_layers=num_layers,
bidirectional=bidirectional,
)
self.decoder = nn.Dense(
num_output, in_units=num_hidden, flatten=False
)
def hybrid_forward(self, F, inputs):
return self.decoder(self.rnn(inputs))
| 1,462 | 28.26 | 75 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deep_factor_v0/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.model.common import Tensor
import indycar.model.global_variables as gvar
class DeepFactorXNetworkBase(HybridBlock):
def __init__(
self,
global_model: HybridBlock,
local_model: HybridBlock,
embedder: FeatureEmbedder,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.global_model = global_model
self.local_model = local_model
self.embedder = embedder
with self.name_scope():
self.loading = nn.Dense(
units=global_model.num_output, use_bias=False
)
self._debug_print = True
def assemble_features(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> Tensor: # (batch_size, history_length, num_features)
# todo: this is shared by more than one places, and should be a general routine
embedded_cat = self.embedder(
feat_static_cat
) # (batch_size, num_features * embedding_size)
# a workaround when you wish to repeat without knowing the number
# of repeats
helper_ones = F.ones_like(
F.slice_axis(time_feat, axis=2, begin=-1, end=None)
)
# (batch_size, history_length, num_features * embedding_size)
repeated_cat = F.batch_dot(
helper_ones, F.expand_dims(embedded_cat, axis=1)
)
# putting together all the features
input_feat = F.concat(repeated_cat, time_feat, dim=2)
#debug
if gvar.hybridize==False:
#if gvar.hybridize==False and self._debug_print:
#if True:
print('embedded_cat size:', embedded_cat.shape, 'time_feat size:', time_feat.shape, 'input_feat size:', input_feat.shape)
self._debug_print = False
return embedded_cat, input_feat
def compute_global_local(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
time_feat: Tensor, # (batch_size, history_length, num_features)
) -> (Tensor, Tensor): # both of size (batch_size, history_length, 1)
cat, local_input = self.assemble_features(
F, feat_static_cat, time_feat
)
loadings = self.loading(cat) # (batch_size, num_factors)
global_factors = self.global_model(
time_feat
) # (batch_size, history_length, num_factors)
fixed_effect = F.batch_dot(
global_factors, loadings.expand_dims(axis=2)
) # (batch_size, history_length, 1)
random_effect = F.log(
F.exp(self.local_model(local_input)) + 1.0
) # (batch_size, history_length, 1)
return F.exp(fixed_effect), random_effect
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
def negative_normal_likelihood(self, F, y, mu, sigma):
return (
F.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * F.square((y - mu) / sigma)
)
class DeepFactorXTrainingNetwork(DeepFactorXNetworkBase):
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, 1)
past_time_feat: Tensor,
# (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length)
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
A batch of negative log likelihoods.
"""
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, past_time_feat
)
loss = self.negative_normal_likelihood(
F, past_target.expand_dims(axis=2), fixed_effect, random_effect
)
return loss
class DeepFactorXPredictionNetwork(DeepFactorXNetworkBase):
@validated()
def __init__(
self, prediction_len: int, num_parallel_samples: int, **kwargs
) -> None:
super().__init__(**kwargs)
self.prediction_len = prediction_len
self.num_parallel_samples = num_parallel_samples
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
past_target: Tensor,
) -> Tensor:
"""
Parameters
----------
F
Function space
feat_static_cat
Shape: (batch_size, 1)
past_time_feat
Shape: (batch_size, history_length, num_features)
future_time_feat
Shape: (batch_size, prediction_length, num_features)
past_target
Shape: (batch_size, history_length)
Returns
-------
Tensor
Samples of shape (batch_size, num_samples, prediction_length).
"""
time_feat = F.concat(past_time_feat, future_time_feat, dim=1)
fixed_effect, random_effect = self.compute_global_local(
F, feat_static_cat, time_feat
)
samples = F.concat(
*[
F.sample_normal(fixed_effect, random_effect)
for _ in range(self.num_parallel_samples)
],
dim=2,
) # (batch_size, train_len + prediction_len, num_samples)
pred_samples = F.slice_axis(
samples, axis=1, begin=-self.prediction_len, end=None
) # (batch_size, prediction_len, num_samples)
return pred_samples.swapaxes(1, 2)
| 6,537 | 30.892683 | 133 | py |
rankpredictor | rankpredictor-master/src/indycar/model/lstmw/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class LSTMWNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class LSTMWTrainingNetwork(LSTMWNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training LSTMW, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
#loss_weights = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# special setting for pitage, 0 for pitstop
loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class LSTMWPredictionNetwork(LSTMWNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 21,888 | 34.883607 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/lstmw/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import LSTMWPredictionNetwork, LSTMWTrainingNetwork
class LSTMWEstimator(GluonEstimator):
"""
Construct a LSTMW estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's LSTMW Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> LSTMWTrainingNetwork:
return LSTMWTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = LSTMWPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,637 | 37.066265 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
#self.batch_size = batch_size
self.batch_size = 1
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
#self.batch_size,
1,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 24,040 | 30.969415 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
#save data
self.reset_savedata()
def reset_savedata(self):
self.savedata = {}
self.savedata['input'] = []
self.savedata['target'] = []
self.savedata['lags'] = []
self.savedata['theta'] = []
self.savedata['hstate'] = []
self.savedata['rnnoutput'] = []
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
#save data here
self.savedata['input'].append(inputs.asnumpy().copy())
#self.savedata.append(inputs)
self.savedata['lags'].append(lags.asnumpy().copy())
#print(self.lags_seq)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#save target in training
self.savedata['target'].append(target.asnumpy().copy())
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
#def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
def __init__(self, num_parallel_samples: int = 1, **kwargs) -> None:
super().__init__(**kwargs)
#self.num_parallel_samples = num_parallel_samples
self.num_parallel_samples = 1
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
#save only the last output
if k == self.prediction_length -1:
self.savedata['hstate'].append(repeated_states)
self.savedata['rnnoutput'].append(rnn_outputs.asnumpy().copy())
self.savedata['theta'].append(distr_args)
self.savedata['target'].append(new_samples.asnumpy().copy())
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,891 | 34.601866 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/deepar-savedata/predictor.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import itertools
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
import json
from typing import (
TYPE_CHECKING,
Tuple,
Union,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
)
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
import gluonts
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.core.component import (
DType,
equals,
from_hyperparameters,
get_mxnet_context,
validated,
)
from gluonts.core.exception import GluonTSException
from gluonts.core.serde import dump_json, fqname_for, load_json
from gluonts.dataset.common import DataEntry, Dataset, ListDataset
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from gluonts.dataset.loader import DataBatch, InferenceDataLoader
from gluonts.model.forecast import Forecast
from gluonts.support.util import (
export_repr_block,
export_symb_block,
get_hybrid_forward_input_names,
hybrid_block_to_symbol_block,
import_repr_block,
import_symb_block,
)
from gluonts.transform import Transformation
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
ctx
Optional mxnet context to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, ctx)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, prediction_length: int, freq: str) -> None:
super().__init__(prediction_length, freq)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class GluonPredictor(Predictor):
"""
Base predictor type for Gluon-based models.
Parameters
----------
input_names
Input tensor names for the graph
prediction_net
Network that will be called for prediction
batch_size
Number of time series to predict in a single batch
prediction_length
Number of time steps to predict
freq
Frequency of the input data
input_transform
Input transformation pipeline
output_transform
Output transformation
ctx
MXNet context to use for computation
forecast_generator
Class to generate forecasts from network ouputs
"""
BlockType = mx.gluon.Block
def __init__(
self,
input_names: List[str],
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = input_names
self.prediction_net = prediction_net
#self.batch_size = batch_size
self.batch_size = 1
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.ctx = ctx
self.dtype = dtype
def hybridize(self, batch: DataBatch) -> None:
"""
Hybridizes the underlying prediction network.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call.
"""
self.prediction_net.hybridize(active=True)
self.prediction_net(*[batch[k] for k in self.input_names])
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
"""
Returns a variant of the current :class:`GluonPredictor` backed
by a Gluon `SymbolBlock`. If the current predictor is already a
:class:`SymbolBlockPredictor`, it just returns itself.
Parameters
----------
batch
A batch of data to use for the required forward pass after the
`hybridize()` call of the underlying network.
Returns
-------
SymbolBlockPredictor
A predictor derived from the current one backed by a `SymbolBlock`.
"""
raise NotImplementedError
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
#print('predict')
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
#self.batch_size,
1,
ctx=self.ctx,
dtype=self.dtype,
)
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def __eq__(self, that):
if type(self) != type(that):
return False
# TODO: also consider equality of the pipelines
# if not equals(self.input_transform, that.input_transform):
# return False
return equals(
self.prediction_net.collect_params(),
that.prediction_net.collect_params(),
)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
# serialize every GluonPredictor-specific parameters
# serialize the prediction network
self.serialize_prediction_net(path)
# serialize transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# FIXME: also needs to serialize the output_transform
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
def serialize_prediction_net(self, path: Path) -> None:
raise NotImplementedError()
class SymbolBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure as an MXNet symbolic
graph. Should be used for models deployed in production in order to
ensure forward-compatibility as GluonTS models evolve.
Used by the training shell if training is invoked with a hyperparameter
`use_symbol_block_predictor = True`.
"""
BlockType = mx.gluon.SymbolBlock
def as_symbol_block_predictor(
self, batch: DataBatch
) -> "SymbolBlockPredictor":
return self
def serialize_prediction_net(self, path: Path) -> None:
export_symb_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "SymbolBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
parameters["ctx"] = ctx
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
num_inputs = len(parameters["input_names"])
prediction_net = import_symb_block(
num_inputs, path, "prediction_net"
)
return SymbolBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class RepresentableBlockPredictor(GluonPredictor):
"""
A predictor which serializes the network structure using the
JSON-serialization methods located in `gluonts.core.serde`. Use the following
logic to create a `RepresentableBlockPredictor` from a trained prediction
network.
>>> def create_representable_block_predictor(
... prediction_network: mx.gluon.HybridBlock,
... **kwargs
... ) -> RepresentableBlockPredictor:
... return RepresentableBlockPredictor(
... prediction_net=prediction_network,
... **kwargs
... )
"""
BlockType = mx.gluon.HybridBlock
def __init__(
self,
prediction_net: BlockType,
batch_size: int,
prediction_length: int,
freq: str,
ctx: mx.Context,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[
Callable[[DataEntry, np.ndarray], np.ndarray]
] = None,
dtype: DType = np.float32,
) -> None:
super().__init__(
input_names=get_hybrid_forward_input_names(prediction_net),
prediction_net=prediction_net,
batch_size=batch_size,
prediction_length=prediction_length,
freq=freq,
ctx=ctx,
input_transform=input_transform,
forecast_generator=forecast_generator,
output_transform=output_transform,
dtype=dtype,
)
def as_symbol_block_predictor(
self, batch: DataBatch
) -> SymbolBlockPredictor:
symbol_block_net = hybrid_block_to_symbol_block(
hb=self.prediction_net,
data_batch=[batch[k] for k in self.input_names],
)
return SymbolBlockPredictor(
input_names=self.input_names,
prediction_net=symbol_block_net,
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
ctx=self.ctx,
input_transform=self.input_transform,
forecast_generator=self.forecast_generator,
output_transform=self.output_transform,
dtype=self.dtype,
)
def serialize(self, path: Path) -> None:
logging.warning(
"Serializing RepresentableBlockPredictor instances does not save "
"the prediction network structure in a backwards-compatible "
"manner. Be careful not to use this method in production."
)
super().serialize(path)
def serialize_prediction_net(self, path: Path) -> None:
export_repr_block(self.prediction_net, path, "prediction_net")
@classmethod
def deserialize(
cls, path: Path, ctx: Optional[mx.Context] = None
) -> "RepresentableBlockPredictor":
ctx = ctx if ctx is not None else get_mxnet_context()
with mx.Context(ctx):
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transform = load_json(fp.read())
# deserialize prediction network
prediction_net = import_repr_block(path, "prediction_net")
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["ctx"] = ctx
return RepresentableBlockPredictor(
input_transform=transform,
prediction_net=prediction_net,
**parameters,
)
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(base_predictor.prediction_length, base_predictor.freq)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(estimator.prediction_length, estimator.freq)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
local_ds = ListDataset([ts], freq=self.freq)
trained_pred = self.estimator.train(local_ds)
logger.info(f"predicting for time series {i} / {len(dataset)}")
predictions = trained_pred.predict(local_ds, **kwargs)
for pred in predictions:
yield pred
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 24,040 | 30.969415 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/deepar-savedata/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
#save data
self.reset_savedata()
def reset_savedata(self):
self.savedata = {}
self.savedata['input'] = []
self.savedata['target'] = []
self.savedata['lags'] = []
self.savedata['theta'] = []
self.savedata['hstate'] = []
self.savedata['rnnoutput'] = []
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
#save data here
self.savedata['input'].append(inputs.asnumpy().copy())
#self.savedata.append(inputs)
self.savedata['lags'].append(lags.asnumpy().copy())
#print(self.lags_seq)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARTrainingNetwork(DeepARNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#save target in training
self.savedata['target'].append(target.asnumpy().copy())
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
#def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
def __init__(self, num_parallel_samples: int = 1, **kwargs) -> None:
super().__init__(**kwargs)
#self.num_parallel_samples = num_parallel_samples
self.num_parallel_samples = 1
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
#save only the last output
if k == self.prediction_length -1:
self.savedata['hstate'].append(repeated_states)
self.savedata['rnnoutput'].append(rnn_outputs.asnumpy().copy())
self.savedata['theta'].append(distr_args)
self.savedata['target'].append(new_samples.asnumpy().copy())
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,891 | 34.601866 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-savedata/deepar-savedata/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARPredictionNetwork, DeepARTrainingNetwork
class DeepAREstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARTrainingNetwork:
return DeepARTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,645 | 37.090361 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparsavedata-v0/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARSaveDataNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
#scaling: bool = True,
scaling: bool = False,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
#save data
self.savedata = []
self.savetarget = []
self.saveother = []
def reset_savedata(self):
self.savedata = []
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
#save data here
self.savedata.append(inputs.asnumpy().copy())
#self.savedata.append(inputs)
self.saveother.append(lags.asnumpy().copy())
print(self.lags_seq)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARSaveDataTrainingNetwork(DeepARSaveDataNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepARSaveData, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#save target in training
self.savetarget.append(target.asnumpy().copy())
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARSaveDataPredictionNetwork(DeepARSaveDataNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,293 | 34.613419 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparsavedata-v0/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARSaveDataPredictionNetwork, DeepARSaveDataTrainingNetwork
class DeepARSaveDataEstimator(GluonEstimator):
"""
Construct a DeepARSaveData estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepARSaveData Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
#save data
self.network = None
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARSaveDataTrainingNetwork:
net = DeepARSaveDataTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
self.network = net
return net
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARSaveDataPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,805 | 36.775811 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-determin/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import mxnet as mx
from mxnet.gluon import loss as gloss
# First-party imports
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import validated
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.model.common import Tensor
class DMLPNetworkBase(mx.gluon.HybridBlock):
"""
Abstract base class to implement feed-forward networks for probabilistic
time series prediction.
This class does not implement hybrid_forward: this is delegated
to the two subclasses DMLPTrainingNetwork and
DMLPPredictionNetwork, that define respectively how to
compute the loss and how to generate predictions.
Parameters
----------
num_hidden_dimensions
Number of hidden nodes in each layer.
prediction_length
Number of time units to predict.
context_length
Number of time units that condition the predictions.
batch_normalization
Whether to use batch normalization.
mean_scaling
Scale the network input by the data mean and the network output by
its inverse.
distr_output
Distribution to fit.
kwargs
"""
# Needs the validated decorator so that arguments types are checked and
# the block can be serialized.
@validated()
def __init__(
self,
num_hidden_dimensions: List[int],
prediction_length: int,
context_length: int,
batch_normalization: bool,
mean_scaling: bool,
dropout: float,
#distr_output: DistributionOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_hidden_dimensions = num_hidden_dimensions
self.prediction_length = prediction_length
self.context_length = context_length
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
#self.distr_output = distr_output
self.loss = gloss.L2Loss()
with self.name_scope():
#self.distr_args_proj = self.distr_output.get_args_proj()
self.mlp = mx.gluon.nn.HybridSequential()
dims = self.num_hidden_dimensions
for layer_no, units in enumerate(dims[:-1]):
self.mlp.add(mx.gluon.nn.Dense(units=units, activation="relu"))
if self.batch_normalization:
self.mlp.add(mx.gluon.nn.BatchNorm())
#dropout
self.mlp.add(mx.gluon.nn.Dropout(dropout))
self.mlp.add(mx.gluon.nn.Dense(units=prediction_length * dims[-1]))
self.mlp.add(
mx.gluon.nn.HybridLambda(
lambda F, o: F.reshape(
o, (-1, prediction_length, dims[-1])
)
)
)
self.scaler = MeanScaler() if mean_scaling else NOPScaler()
#def get_distr(self, F, feat: Tensor, target: Tensor) -> Distribution:
#def get_distr(self, F, feat: Tensor) -> Distribution:
# """
# Given past target values, applies the feed-forward network and
# maps the output to a probability distribution for future observations.
# Parameters
# ----------
# F
# target
# Tensor containing past target observations.
# Shape: (batch_size, context_length, target_dim).
# Returns
# -------
# Distribution
# The predicted probability distribution for future observations.
# """
# # (batch_size, seq_len, target_dim) and (batch_size, seq_len, target_dim)
# #scaled_target, target_scale = self.scaler(
# # past_target,
# # F.ones_like(past_target), # TODO: pass the actual observed here
# #)
# target_scale = F.ones_like(feat).mean(axis=1)
# mlp_outputs = self.mlp(feat)
# distr_args = self.distr_args_proj(mlp_outputs)
# return self.distr_output.distribution(
# distr_args, scale=target_scale.expand_dims(axis=1)
# )
def get_output(self, F, feat: Tensor) -> Tensor:
"""
"""
target_scale = F.ones_like(feat).mean(axis=1)
mlp_outputs = self.mlp(feat)
#distr_args = self.distr_args_proj(mlp_outputs)
#noret = self.distr_output.distribution(
# distr_args, scale=target_scale.expand_dims(axis=1)
#)
return mlp_outputs
class DMLPTrainingNetwork(DMLPNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, target: Tensor, feat: Tensor
) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and returns the loss associated with the actual future observations.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
future_target
Tensor with future observations.
Shape: (batch_size, prediction_length, target_dim).
Returns
-------
Tensor
Loss tensor. Shape: (batch_size, ).
"""
#distr = self.get_distr(F, feat)
## (batch_size, prediction_length, target_dim)
#loss = distr.loss(target)
## (batch_size, )
#return loss.mean(axis=1)
output = self.get_output(F, feat)
# (batch_size, prediction_length, target_dim)
l = self.loss(target, output)
# (batch_size, )
return l
class DMLPPredictionNetwork(DMLPNetworkBase):
@validated()
def __init__(
self, num_parallel_samples: int = 100, *args, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, feat: Tensor) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and draws samples from it.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Tensor
Prediction sample. Shape: (batch_size, samples, prediction_length).
"""
#distr = self.get_distr(F, feat)
## (num_samples, batch_size, prediction_length)
#samples = distr.sample(self.num_parallel_samples)
## (batch_size, num_samples, prediction_length)
#return samples.swapaxes(0, 1)
# (batch_size, prediction_length, target_dim)
output = self.get_output(F, feat)
return output
| 7,371 | 31.475771 | 82 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-determin/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
#from gluonts.transform import Identity, RemoveFields
from gluonts.transform import RemoveFields
from gluonts.transform import (
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
Transformation,
)
# Relative imports
from ._network import (
DMLPPredictionNetwork,
DMLPTrainingNetwork,
)
class DMLPEstimator(GluonEstimator):
"""
DMLPEstimator shows how to build a simple DMLP model predicting
the next target time-steps given the previous ones.
Given that we want to define a gluon model trainable by SGD, we inherit the
parent class `GluonEstimator` that handles most of the logic for fitting a
neural-network.
We thus only have to define:
1. How the data is transformed before being fed to our model::
def create_transformation(self) -> Transformation
2. How the training happens::
def create_training_network(self) -> HybridBlock
3. how the predictions can be made for a batch given a trained network::
def create_predictor(
self,
transformation: Transformation,
trained_net: HybridBlock,
) -> Predictor
Parameters
----------
freq
Time time granularity of the data
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
num_hidden_dimensions
Number of hidden nodes in each layer (default: [40, 40])
context_length
Number of time units that condition the predictions
(default: None, in which case context_length = prediction_length)
distr_output
Distribution to fit (default: StudentTOutput())
batch_normalization
Whether to use batch normalization (default: False)
mean_scaling
Scale the network input by the data mean and the network output by
its inverse (default: True)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
# The validated() decorator makes sure that parameters are checked by
# Pydantic and allows to serialize/print models. Note that all parameters
# have defaults except for `freq` and `prediction_length`. which is
# recommended in GluonTS to allow to compare models easily.
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
num_hidden_dimensions: Optional[List[int]] = None,
context_length: Optional[int] = None,
#distr_output: DistributionOutput = StudentTOutput(),
batch_normalization: bool = False,
mean_scaling: bool = False,
dropout: float = 0.5,
num_parallel_samples: int = 100,
) -> None:
"""
Defines an estimator. All parameters should be serializable.
"""
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_hidden_dimensions is None or (
[d > 0 for d in num_hidden_dimensions]
), "Elements of `num_hidden_dimensions` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.num_hidden_dimensions = (
num_hidden_dimensions
if num_hidden_dimensions is not None
else list([40, 40])
)
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.freq = freq
#self.distr_output = distr_output
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.num_parallel_samples = num_parallel_samples
self.dropout = dropout
# here we do only a simple operation to convert the input data to a form
# that can be digested by our model by only splitting the target in two, a
# conditioning part and a to-predict part, for each training example.
# fFr a more complex transformation example, see the `gluonts.model.deepar`
# transformation that includes time features, age feature, observed values
# indicator, ...
def create_transformation(self) -> Transformation:
return Chain(
[RemoveFields(field_names=['del'])]
)
#Identity()
# defines the network, we get to see one batch to initialize it.
# the network should return at least one tensor that is used as a loss to minimize in the training loop.
# several tensors can be returned for instance for analysis, see DeepARTrainingNetwork for an example.
def create_training_network(self) -> HybridBlock:
return DMLPTrainingNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
#distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout
)
# we now define how the prediction happens given that we are provided a
# training network.
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DMLPPredictionNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
#distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout,
params=trained_network.collect_params(),
num_parallel_samples=self.num_parallel_samples,
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 7,529 | 36.839196 | 108 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.