repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
from gluonts.support.util import weighted_average
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerWeightedNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerWeightedTrainingNetwork(TransformerWeightedNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
# compute loss
distr_args = self.proj_dist_args(dec_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
# original loss
#loss = distr.loss(future_target)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
#loss = distr.loss(target)
loss = distr.loss(future_target)
## (batch_size, seq_len, *target_shape)
#observed_values = F.concat(
# past_observed_values.slice_axis(
# axis=1,
# begin=self.history_length - self.context_length,
# end=self.history_length,
# ),
# future_observed_values,
# dim=1,
#)
## mask the loss at one time step iff one or more observations is missing in the target dimensions
## (batch_size, seq_len)
#loss_weights1 = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# deal with imbalance problem
# set higher weight for loss at time step when target changes
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
#if _hybridized_:
if True:
r = F.slice_axis(target, axis=1, begin=-2, end=None)
l = F.slice_axis(target, axis=1, begin=-4, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
loss_weights2 = w
else:
r = F.slice_axis(target, axis=1, begin=2, end=None)
l = F.slice_axis(target, axis=1, begin=0, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
s = F.slice_axis(target, axis=1, begin=0, end=2)
z = F.ones_like(s)
loss_weights2 = F.concat(z, w)
#loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
#
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights2, axis=1
)
# need to mask possible nans and -inf
#loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
#return weighted loss of future
#loss = F.slice_axis(weighted_loss, axis=1, begin=-2, end=None)
loss = weighted_loss
return loss.mean()
class TransformerWeightedPredictionNetwork(TransformerWeightedNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 19,260 | 33.272242 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import (
TransformerWeightedPredictionNetwork,
TransformerWeightedTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerWeightedEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerWeightedTrainingNetwork:
training_network = TransformerWeightedTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerWeightedPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,253 | 37.656151 | 100 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-savedata/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import validated
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.model.common import Tensor
class MLPNetworkBase(mx.gluon.HybridBlock):
"""
Abstract base class to implement feed-forward networks for probabilistic
time series prediction.
This class does not implement hybrid_forward: this is delegated
to the two subclasses MLPTrainingNetwork and
MLPPredictionNetwork, that define respectively how to
compute the loss and how to generate predictions.
Parameters
----------
num_hidden_dimensions
Number of hidden nodes in each layer.
prediction_length
Number of time units to predict.
context_length
Number of time units that condition the predictions.
batch_normalization
Whether to use batch normalization.
mean_scaling
Scale the network input by the data mean and the network output by
its inverse.
distr_output
Distribution to fit.
kwargs
"""
# Needs the validated decorator so that arguments types are checked and
# the block can be serialized.
@validated()
def __init__(
self,
num_hidden_dimensions: List[int],
prediction_length: int,
context_length: int,
batch_normalization: bool,
mean_scaling: bool,
dropout: float,
distr_output: DistributionOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_hidden_dimensions = num_hidden_dimensions
self.prediction_length = prediction_length
self.context_length = context_length
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.distr_output = distr_output
with self.name_scope():
self.distr_args_proj = self.distr_output.get_args_proj()
self.mlp = mx.gluon.nn.HybridSequential()
dims = self.num_hidden_dimensions
for layer_no, units in enumerate(dims[:-1]):
self.mlp.add(mx.gluon.nn.Dense(units=units, activation="relu"))
if self.batch_normalization:
self.mlp.add(mx.gluon.nn.BatchNorm())
#dropout
self.mlp.add(mx.gluon.nn.Dropout(dropout))
self.mlp.add(mx.gluon.nn.Dense(units=prediction_length * dims[-1]))
self.mlp.add(
mx.gluon.nn.HybridLambda(
lambda F, o: F.reshape(
o, (-1, prediction_length, dims[-1])
)
)
)
self.scaler = MeanScaler() if mean_scaling else NOPScaler()
#save data
self.reset_savedata()
def reset_savedata(self):
self.savedata = {}
self.savedata['input'] = []
self.savedata['target'] = []
self.savedata['theta'] = []
self.savedata['mlpoutput'] = []
#def get_distr(self, F, feat: Tensor, target: Tensor) -> Distribution:
def get_distr(self, F, feat: Tensor) -> Distribution:
"""
Given past target values, applies the feed-forward network and
maps the output to a probability distribution for future observations.
Parameters
----------
F
target
Tensor containing past target observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Distribution
The predicted probability distribution for future observations.
"""
# (batch_size, seq_len, target_dim) and (batch_size, seq_len, target_dim)
#scaled_target, target_scale = self.scaler(
# past_target,
# F.ones_like(past_target), # TODO: pass the actual observed here
#)
target_scale = F.ones_like(feat).mean(axis=1)
mlp_outputs = self.mlp(feat)
self.savedata['mlpoutput'].append(mlp_outputs.asnumpy().copy())
self.savedata['input'].append(feat.asnumpy().copy())
distr_args = self.distr_args_proj(mlp_outputs)
self.savedata['theta'].append(distr_args)
return self.distr_output.distribution(
distr_args, scale=target_scale.expand_dims(axis=1)
)
class MLPTrainingNetwork(MLPNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, target: Tensor, feat: Tensor
) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and returns the loss associated with the actual future observations.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
future_target
Tensor with future observations.
Shape: (batch_size, prediction_length, target_dim).
Returns
-------
Tensor
Loss tensor. Shape: (batch_size, ).
"""
distr = self.get_distr(F, feat)
# (batch_size, prediction_length, target_dim)
loss = distr.loss(target)
# (batch_size, )
return loss.mean(axis=1)
class MLPPredictionNetwork(MLPNetworkBase):
@validated()
def __init__(
self, num_parallel_samples: int = 100, *args, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, feat: Tensor) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and draws samples from it.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Tensor
Prediction sample. Shape: (batch_size, samples, prediction_length).
"""
distr = self.get_distr(F, feat)
# (num_samples, batch_size, prediction_length)
samples = distr.sample(self.num_parallel_samples)
self.savedata['target'].append(samples.asnumpy().copy())
# (batch_size, num_samples, prediction_length)
return samples.swapaxes(0, 1)
| 7,086 | 31.213636 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-savedata/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
#from gluonts.transform import Identity, RemoveFields
from gluonts.transform import RemoveFields
from gluonts.transform import (
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
Transformation,
)
# Relative imports
from ._network import (
MLPPredictionNetwork,
MLPTrainingNetwork,
)
class MLPEstimator(GluonEstimator):
"""
MLPEstimator shows how to build a simple MLP model predicting
the next target time-steps given the previous ones.
Given that we want to define a gluon model trainable by SGD, we inherit the
parent class `GluonEstimator` that handles most of the logic for fitting a
neural-network.
We thus only have to define:
1. How the data is transformed before being fed to our model::
def create_transformation(self) -> Transformation
2. How the training happens::
def create_training_network(self) -> HybridBlock
3. how the predictions can be made for a batch given a trained network::
def create_predictor(
self,
transformation: Transformation,
trained_net: HybridBlock,
) -> Predictor
Parameters
----------
freq
Time time granularity of the data
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
num_hidden_dimensions
Number of hidden nodes in each layer (default: [40, 40])
context_length
Number of time units that condition the predictions
(default: None, in which case context_length = prediction_length)
distr_output
Distribution to fit (default: StudentTOutput())
batch_normalization
Whether to use batch normalization (default: False)
mean_scaling
Scale the network input by the data mean and the network output by
its inverse (default: True)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
# The validated() decorator makes sure that parameters are checked by
# Pydantic and allows to serialize/print models. Note that all parameters
# have defaults except for `freq` and `prediction_length`. which is
# recommended in GluonTS to allow to compare models easily.
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
num_hidden_dimensions: Optional[List[int]] = None,
context_length: Optional[int] = None,
distr_output: DistributionOutput = StudentTOutput(),
batch_normalization: bool = False,
mean_scaling: bool = False,
dropout: float = 0.5,
num_parallel_samples: int = 100,
) -> None:
"""
Defines an estimator. All parameters should be serializable.
"""
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_hidden_dimensions is None or (
[d > 0 for d in num_hidden_dimensions]
), "Elements of `num_hidden_dimensions` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.num_hidden_dimensions = (
num_hidden_dimensions
if num_hidden_dimensions is not None
else list([40, 40])
)
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.freq = freq
self.distr_output = distr_output
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.num_parallel_samples = num_parallel_samples
self.dropout = dropout
# here we do only a simple operation to convert the input data to a form
# that can be digested by our model by only splitting the target in two, a
# conditioning part and a to-predict part, for each training example.
# fFr a more complex transformation example, see the `gluonts.model.deepar`
# transformation that includes time features, age feature, observed values
# indicator, ...
def create_transformation(self) -> Transformation:
return Chain(
[RemoveFields(field_names=['del'])]
)
#Identity()
# defines the network, we get to see one batch to initialize it.
# the network should return at least one tensor that is used as a loss to minimize in the training loop.
# several tensors can be returned for instance for analysis, see DeepARTrainingNetwork for an example.
def create_training_network(self) -> HybridBlock:
return MLPTrainingNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout
)
# we now define how the prediction happens given that we are provided a
# training network.
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = MLPPredictionNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout,
params=trained_network.collect_params(),
num_parallel_samples=self.num_parallel_samples,
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 7,518 | 36.78392 | 108 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v2/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,098 | 36.363158 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v2/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.summary()
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,229 | 36.076923 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v2/layers.py | import tensorflow as tf
from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,517 | 38.982301 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v0/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer
from .loss import gaussian_likelihood
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100, loss=gaussian_likelihood,
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.loss = loss
self.optimizer = optimizer
self.keras_model = None
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
loc, scale = GaussianLayer(1, name='main_output')(x)
return input_shape, inputs, [loc, scale]
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
loc, scale = GaussianLayer(1, name='main_output')(x)
return input_shape, inputs, [loc, scale]
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 5,812 | 37.243421 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v0/layers.py | from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[2]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
output_mu = K.dot(x, self.kernel_1) + self.bias_1
output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]
| 1,954 | 44.465116 | 85 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v1/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,098 | 36.363158 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v1/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.summary()
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,181 | 36.020619 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model.v1/layers.py | import tensorflow as tf
from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,517 | 38.982301 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model_eager/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from tensorflow.keras.layers import Input, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,142 | 36.594737 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model_eager/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from tensorflow.keras.layers import Input, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend as K
import logging
import numpy as np
import time
logger = logging.getLogger('deepar')
_debug_itercnt = 0
_debug_profile_start = 5
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None,
use_generator=False):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
self.use_generator = use_generator
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
print('instantiate_and_fit(self, verbose=False)')
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer,
experimental_run_tf_function = False
)
#model.fit_generator(ts_generator(self.ts_obj,
model.fit(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate, unroll=True)(x)
#x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate, unroll=False)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1,
batch_size = 32,
callbacks=None):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
#model = Model(inputs, theta[0])
model = Model(inputs, theta)
model.compile(loss=self.loss(), optimizer=self.optimizer)
#experimental_run_tf_function = True)
#experimental_run_tf_function = False)
model.summary()
_debug_itercnt = 0
#model.fit_generator(ts_generator(self.ts_obj,
if self.use_generator:
model.fit(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs,
callbacks=callbacks)
else:
datax = np.empty((0, self.ts_obj.n_steps, self.ts_obj.n_features))
datay = np.empty((0, self.ts_obj.n_steps, 1))
curbatch = 0
while curbatch < self.ts_obj.n_batches:
x,y = self.ts_obj.next_batch(32, input_shape[0])
datax = np.append(datax, x, axis=0)
datay = np.append(datay, y, axis=0)
curbatch = curbatch + 1
# start training
with open('./vtune-flag.txt','w') as flagf:
flagf.write('hi')
print('Start training...', flush=True)
start_time = time.time()
model.fit(datax, datay,
batch_size = batch_size,
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs,
callbacks=callbacks)
execution_time = time.time() - start_time
print('Execution time:', execution_time)
print('Training Throughput:', batch_size * self.epochs * self.steps_per_epoch / execution_time, ' items/s')
print('Training Speed:', execution_time / (batch_size * self.epochs * self.steps_per_epoch ), ' s/item')
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
global _debug_itercnt, _debug_profile_start
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
#if _debug_itercnt == _debug_profile_start:
# with open('./vtune-flag.txt','w') as flagf:
# flagf.write('hi')
_debug_itercnt += 1
yield batch[0], batch[1]
| 9,488 | 36.654762 | 119 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model_eager/layers.py | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.initializers import glorot_normal
from tensorflow.keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
super(StudentTLayer, self).__init__(**kwargs)
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
#n_weight_rows = 40
print('n_weight_rows:', n_weight_rows)
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer="random_normal",
#glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer="random_normal",
#initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
theta = tf.concat([output_mu, output_sig_pos, output_nu_pos],axis=2)
#return [output_mu, output_sig_pos, output_nu_pos]
return theta
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return (input_shape[0], input_shape[1], 3)
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
def get_config(self):
base_config = super(StudentTLayer, self).get_config()
base_config['output_dim'] = self.output_dim
@classmethod
def from_config(cls, config):
return cls(**config)
| 5,125 | 38.430769 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model_eager/lstm_ve.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend as K
import logging
import numpy as np
import os
from datetime import datetime
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
print('instantiate_and_fit(self, verbose=False)')
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer,
experimental_run_tf_function = False
)
#model.fit_generator(ts_generator(self.ts_obj,
model.fit(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate, unroll=True, use_veop=True)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
#model = Model(inputs, theta[0])
model = Model(inputs, theta)
model.compile(loss=self.loss(), optimizer=self.optimizer)
#experimental_run_tf_function = True)
#experimental_run_tf_function = False)
model.summary()
datax = np.empty((0, self.ts_obj.n_steps, self.ts_obj.n_features))
datay = np.empty((0, self.ts_obj.n_steps, 1))
curbatch = 0
while curbatch < self.ts_obj.n_batches:
#print (f"Preparing data. Batch {curbatch}")
x,y = self.ts_obj.next_batch(32, input_shape[0])
datax = np.append(datax, x, axis=0)
datay = np.append(datay, y, axis=0)
curbatch += 1
host = os.uname()[1]
host = "%s" % host
batsize = 3200
log_dir = "./logsdbBatSizeTF23/%s/unrollVEOP/%s/%s" % (
batsize, host, datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir,
histogram_freq=1,
profile_batch=3)
#model.fit_generator(ts_generator(self.ts_obj,
#model.fit(ts_generator(self.ts_obj,
# input_shape[0]),
# steps_per_epoch=self.steps_per_epoch,
model.fit(datax, datay, batch_size=batsize,
epochs=self.epochs,
#steps_per_epoch=self.steps_per_epoch,
callbacks=[tensorboard_callback]
)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 8,827 | 37.21645 | 107 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from tensorflow.keras.layers import Input, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,142 | 36.594737 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from tensorflow.keras.layers import Input, Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer,
experimental_run_tf_function = False)
model.summary()
#model.fit_generator(ts_generator(self.ts_obj,
model.fit(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,372 | 36.426396 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartfve/model/layers.py | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.initializers import glorot_normal
from tensorflow.keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,550 | 39.274336 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-weighted/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARWeightNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
scaling: bool = True,
dtype: DType = np.float32,
weight_coef: float = 9,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
self.weight_coef = weight_coef
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARWeightTrainingNetwork(DeepARWeightNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights1 = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
# deal with imbalance problem
# set higher weight for loss at time step when target changes
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
#if _hybridized_:
if True:
r = F.slice_axis(target, axis=1, begin=2, end=None)
l = F.slice_axis(target, axis=1, begin=0, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*self.weight_coef
w = F.where(r==l, w1, w9)
s = F.slice_axis(target, axis=1, begin=0, end=2)
z = F.ones_like(s)
loss_weights2 = F.concat(z, w)
else:
c = target[:,2:] - target[:,:-2]
w1 = F.ones_like(c)
w9 = F.ones_like(c) * self.weight_coef
loss_weights2 = F.ones_like(target)
loss_weights2[:,2:] = F.where(c==0, w1, w9)
loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
#
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
# need to mask possible nans and -inf
loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
return weighted_loss, loss
class DeepARWeightPredictionNetwork(DeepARWeightNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 23,038 | 34.886293 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepar-weighted/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARWeightPredictionNetwork, DeepARWeightTrainingNetwork
class DeepARWeightEstimator(GluonEstimator):
"""
Construct a DeepAR estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepAR Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
weight_coef: float = 9,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
#else get_lags_for_frequency(freq_str=freq, num_lags=1)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
self.weight_coef = weight_coef
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
#dummy_value=self.distr_output.value_in_support,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
#dummy_value=self.distr_output.value_in_support,
),
]
)
def create_training_network(self) -> DeepARWeightTrainingNetwork:
return DeepARWeightTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
weight_coef=self.weight_coef,
)
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARWeightPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 13,001 | 37.353982 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparsavedata/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import DType, validated
from gluonts.distribution import DistributionOutput, Distribution
from gluonts.distribution.distribution import getF
from gluonts.model.common import Tensor
from gluonts.support.util import weighted_average
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARSaveDataNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
lags_seq: List[int],
#scaling: bool = True,
scaling: bool = False,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
assert len(cardinality) == len(
embedding_dimension
), "embedding_dimension should be a list with the same size as cardinality"
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.distr_output = distr_output
RnnCell = {"lstm": mx.gluon.rnn.LSTMCell, "gru": mx.gluon.rnn.GRUCell}[
self.cell_type
]
self.target_shape = distr_output.event_shape
# TODO: is the following restriction needed?
assert (
len(self.target_shape) <= 1
), "Argument `target_shape` should be a tuple with 1 element at most"
with self.name_scope():
self.proj_distr_args = distr_output.get_args_proj()
self.rnn = mx.gluon.rnn.HybridSequentialRNNCell()
for k in range(num_layers):
cell = RnnCell(hidden_size=num_cells)
cell = mx.gluon.rnn.ResidualCell(cell) if k > 0 else cell
cell = (
mx.gluon.rnn.ZoneoutCell(cell, zoneout_states=dropout_rate)
if dropout_rate > 0.0
else cell
)
self.rnn.add(cell)
self.rnn.cast(dtype=dtype)
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=embedding_dimension,
dtype=self.dtype,
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
#save data
self.reset_savedata()
def reset_savedata(self):
self.savedata = {}
self.savedata['input'] = []
self.savedata['target'] = []
self.savedata['lags'] = []
self.savedata['theta'] = []
self.savedata['hstate'] = []
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, "
f"found lag {max(indices)} while history length is only "
f"{sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def unroll_encoder(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
Tensor
], # (batch_size, prediction_length, num_features)
future_target: Optional[
Tensor
], # (batch_size, prediction_length, *target_shape)
) -> Tuple[Tensor, List, Tensor, Tensor]:
"""
Unrolls the LSTM encoder over past and, if present, future data.
Returns outputs and state of the encoder, plus the scale of past_target
and a vector of static features that was constructed and fed as input
to the encoder.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help
# prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
feat_static_real,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
#save data here
self.savedata['input'].append(inputs.asnumpy().copy())
#self.savedata.append(inputs)
self.savedata['lags'].append(lags.asnumpy().copy())
#print(self.lags_seq)
# unroll encoder
outputs, state = self.rnn.unroll(
inputs=inputs,
length=subsequences_length,
layout="NTC",
merge_outputs=True,
begin_state=self.rnn.begin_state(
func=F.zeros,
dtype=self.dtype,
batch_size=inputs.shape[0]
if isinstance(inputs, mx.nd.NDArray)
else 0,
),
)
# outputs: (batch_size, seq_len, num_cells)
# state: list of (batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class DeepARSaveDataTrainingNetwork(DeepARSaveDataNetwork):
def distribution(
self,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Distribution:
"""
Returns the distribution predicted by the model on the range of
past_target and future_target.
The distribution is obtained by unrolling the network with the true
target, this is also the distribution that is being minimized during
training. This can be used in anomaly detection, see for instance
examples/anomaly_detection.py.
Input arguments are the same as for the hybrid_forward method.
Returns
-------
Distribution
a distribution object whose mean has shape:
(batch_size, context_length + prediction_length).
"""
# unroll the decoder in "training mode"
# i.e. by providing future data as well
F = getF(feat_static_cat)
rnn_outputs, _, scale, _ = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
distr_args = self.proj_distr_args(rnn_outputs)
return self.distr_output.distribution(distr_args, scale=scale)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
) -> Tensor:
"""
Computes the loss for training DeepARSaveData, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
distr = self.distribution(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
future_observed_values=future_observed_values,
)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#save target in training
self.savedata['target'].append(target.asnumpy().copy())
# (batch_size, seq_len, *target_shape)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
return weighted_loss, loss
class DeepARSaveDataPredictionNetwork(DeepARSaveDataNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers.
the shape of each tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in begin_states
]
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=repeated_states,
layout="NTC",
merge_outputs=True,
)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
#save only the last output
if k == self.prediction_length -1:
self.savedata['hstate'].append(repeated_states.asnumpy().copy())
self.savedata['rnnoutput'].append(rnn_outputs.asnumpy().copy())
self.savedata['theta'].append(distr_args)
self.savedata['target'].append(new_samples.asnumpy().copy())
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 22,835 | 34.68125 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deeparsavedata/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import numpy as np
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
time_features_from_frequency_str,
get_lags_for_frequency,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import DeepARSaveDataPredictionNetwork, DeepARSaveDataTrainingNetwork
class DeepARSaveDataEstimator(GluonEstimator):
"""
Construct a DeepARSaveData estimator.
This implements an RNN-based model, close to the one described in
[SFG17]_.
*Note:* the code of this model is unrelated to the implementation behind
`SageMaker's DeepARSaveData Forecasting Algorithm
<https://docs.aws.amazon.com/sagemaker/latest/dg/deepar.html>`_.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
num_layers
Number of RNN layers (default: 2)
num_cells
Number of RNN cells for each layer (default: 40)
cell_type
Type of recurrent cells to use (available: 'lstm' or 'gru';
default: 'lstm')
dropout_rate
Dropout regularization parameter (default: 0.1)
use_feat_dynamic_real
Whether to use the ``feat_dynamic_real`` field from the data
(default: False)
use_feat_static_cat
Whether to use the ``feat_static_cat`` field from the data
(default: False)
use_feat_static_real
Whether to use the ``feat_static_real`` field from the data
(default: False)
cardinality
Number of values of each categorical feature.
This must be set if ``use_feat_static_cat == True`` (default: None)
embedding_dimension
Dimension of the embeddings for categorical features
(default: [min(50, (cat+1)//2) for cat in cardinality])
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None,
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "lstm",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
dtype: DType = np.float32,
) -> None:
super().__init__(trainer=trainer, dtype=dtype)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_layers > 0, "The value of `num_layers` should be > 0"
assert num_cells > 0, "The value of `num_cells` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (cardinality is not None and use_feat_static_cat) or (
cardinality is None and not use_feat_static_cat
), "You should set `cardinality` if and only if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert embedding_dimension is None or all(
[e > 0 for e in embedding_dimension]
), "Elements of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = (
cardinality if cardinality and use_feat_static_cat else [1]
)
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.scaling = scaling
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.num_parallel_samples = num_parallel_samples
#save data
self.network = None
def create_transformation(self) -> Transformation:
remove_field_names = [FieldName.FEAT_DYNAMIC_CAT]
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ (
[
SetField(
output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]
)
]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> DeepARSaveDataTrainingNetwork:
net = DeepARSaveDataTrainingNetwork(
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
self.network = net
return net
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = DeepARSaveDataPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=self.scaling,
dtype=self.dtype,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
dtype=self.dtype,
)
| 12,805 | 36.775811 | 94 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-original/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import MeanScaler, NOPScaler
from gluonts.core.component import validated
from gluonts.distribution import Distribution, DistributionOutput
from gluonts.model.common import Tensor
class MLPNetworkBase(mx.gluon.HybridBlock):
"""
Abstract base class to implement feed-forward networks for probabilistic
time series prediction.
This class does not implement hybrid_forward: this is delegated
to the two subclasses MLPTrainingNetwork and
MLPPredictionNetwork, that define respectively how to
compute the loss and how to generate predictions.
Parameters
----------
num_hidden_dimensions
Number of hidden nodes in each layer.
prediction_length
Number of time units to predict.
context_length
Number of time units that condition the predictions.
batch_normalization
Whether to use batch normalization.
mean_scaling
Scale the network input by the data mean and the network output by
its inverse.
distr_output
Distribution to fit.
kwargs
"""
# Needs the validated decorator so that arguments types are checked and
# the block can be serialized.
@validated()
def __init__(
self,
num_hidden_dimensions: List[int],
prediction_length: int,
context_length: int,
batch_normalization: bool,
mean_scaling: bool,
dropout: float,
distr_output: DistributionOutput,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_hidden_dimensions = num_hidden_dimensions
self.prediction_length = prediction_length
self.context_length = context_length
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.distr_output = distr_output
with self.name_scope():
self.distr_args_proj = self.distr_output.get_args_proj()
self.mlp = mx.gluon.nn.HybridSequential()
dims = self.num_hidden_dimensions
for layer_no, units in enumerate(dims[:-1]):
self.mlp.add(mx.gluon.nn.Dense(units=units, activation="relu"))
if self.batch_normalization:
self.mlp.add(mx.gluon.nn.BatchNorm())
#dropout
self.mlp.add(mx.gluon.nn.Dropout(dropout))
self.mlp.add(mx.gluon.nn.Dense(units=prediction_length * dims[-1]))
self.mlp.add(
mx.gluon.nn.HybridLambda(
lambda F, o: F.reshape(
o, (-1, prediction_length, dims[-1])
)
)
)
self.scaler = MeanScaler() if mean_scaling else NOPScaler()
#def get_distr(self, F, feat: Tensor, target: Tensor) -> Distribution:
def get_distr(self, F, feat: Tensor) -> Distribution:
"""
Given past target values, applies the feed-forward network and
maps the output to a probability distribution for future observations.
Parameters
----------
F
target
Tensor containing past target observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Distribution
The predicted probability distribution for future observations.
"""
# (batch_size, seq_len, target_dim) and (batch_size, seq_len, target_dim)
#scaled_target, target_scale = self.scaler(
# past_target,
# F.ones_like(past_target), # TODO: pass the actual observed here
#)
target_scale = F.ones_like(feat).mean(axis=1)
mlp_outputs = self.mlp(feat)
distr_args = self.distr_args_proj(mlp_outputs)
return self.distr_output.distribution(
distr_args, scale=target_scale.expand_dims(axis=1)
)
class MLPTrainingNetwork(MLPNetworkBase):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, target: Tensor, feat: Tensor
) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and returns the loss associated with the actual future observations.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
future_target
Tensor with future observations.
Shape: (batch_size, prediction_length, target_dim).
Returns
-------
Tensor
Loss tensor. Shape: (batch_size, ).
"""
distr = self.get_distr(F, feat)
# (batch_size, prediction_length, target_dim)
loss = distr.loss(target)
# (batch_size, )
return loss.mean(axis=1)
class MLPPredictionNetwork(MLPNetworkBase):
@validated()
def __init__(
self, num_parallel_samples: int = 100, *args, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.num_parallel_samples = num_parallel_samples
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, feat: Tensor) -> Tensor:
"""
Computes a probability distribution for future data given the past,
and draws samples from it.
Parameters
----------
F
past_target
Tensor with past observations.
Shape: (batch_size, context_length, target_dim).
Returns
-------
Tensor
Prediction sample. Shape: (batch_size, samples, prediction_length).
"""
distr = self.get_distr(F, feat)
# (num_samples, batch_size, prediction_length)
samples = distr.sample(self.num_parallel_samples)
# (batch_size, num_samples, prediction_length)
return samples.swapaxes(0, 1)
| 6,573 | 32.20202 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/mlp-original/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import DistributionOutput, StudentTOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
#from gluonts.transform import Identity, RemoveFields
from gluonts.transform import RemoveFields
from gluonts.transform import (
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
Transformation,
)
# Relative imports
from ._network import (
MLPPredictionNetwork,
MLPTrainingNetwork,
)
class MLPEstimator(GluonEstimator):
"""
MLPEstimator shows how to build a simple MLP model predicting
the next target time-steps given the previous ones.
Given that we want to define a gluon model trainable by SGD, we inherit the
parent class `GluonEstimator` that handles most of the logic for fitting a
neural-network.
We thus only have to define:
1. How the data is transformed before being fed to our model::
def create_transformation(self) -> Transformation
2. How the training happens::
def create_training_network(self) -> HybridBlock
3. how the predictions can be made for a batch given a trained network::
def create_predictor(
self,
transformation: Transformation,
trained_net: HybridBlock,
) -> Predictor
Parameters
----------
freq
Time time granularity of the data
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
num_hidden_dimensions
Number of hidden nodes in each layer (default: [40, 40])
context_length
Number of time units that condition the predictions
(default: None, in which case context_length = prediction_length)
distr_output
Distribution to fit (default: StudentTOutput())
batch_normalization
Whether to use batch normalization (default: False)
mean_scaling
Scale the network input by the data mean and the network output by
its inverse (default: True)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
# The validated() decorator makes sure that parameters are checked by
# Pydantic and allows to serialize/print models. Note that all parameters
# have defaults except for `freq` and `prediction_length`. which is
# recommended in GluonTS to allow to compare models easily.
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(),
num_hidden_dimensions: Optional[List[int]] = None,
context_length: Optional[int] = None,
distr_output: DistributionOutput = StudentTOutput(),
batch_normalization: bool = False,
mean_scaling: bool = False,
dropout: float = 0.5,
num_parallel_samples: int = 100,
) -> None:
"""
Defines an estimator. All parameters should be serializable.
"""
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert num_hidden_dimensions is None or (
[d > 0 for d in num_hidden_dimensions]
), "Elements of `num_hidden_dimensions` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.num_hidden_dimensions = (
num_hidden_dimensions
if num_hidden_dimensions is not None
else list([40, 40])
)
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.freq = freq
self.distr_output = distr_output
self.batch_normalization = batch_normalization
self.mean_scaling = mean_scaling
self.num_parallel_samples = num_parallel_samples
self.dropout = dropout
# here we do only a simple operation to convert the input data to a form
# that can be digested by our model by only splitting the target in two, a
# conditioning part and a to-predict part, for each training example.
# fFr a more complex transformation example, see the `gluonts.model.deepar`
# transformation that includes time features, age feature, observed values
# indicator, ...
def create_transformation(self) -> Transformation:
return Chain(
[RemoveFields(field_names=['del'])]
)
#Identity()
# defines the network, we get to see one batch to initialize it.
# the network should return at least one tensor that is used as a loss to minimize in the training loop.
# several tensors can be returned for instance for analysis, see DeepARTrainingNetwork for an example.
def create_training_network(self) -> HybridBlock:
return MLPTrainingNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout
)
# we now define how the prediction happens given that we are provided a
# training network.
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = MLPPredictionNetwork(
num_hidden_dimensions=self.num_hidden_dimensions,
prediction_length=self.prediction_length,
context_length=self.context_length,
distr_output=self.distr_output,
batch_normalization=self.batch_normalization,
mean_scaling=self.mean_scaling,
dropout = self.dropout,
params=trained_network.collect_params(),
num_parallel_samples=self.num_parallel_samples,
)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 7,518 | 36.78392 | 108 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerTrainingNetwork(TransformerNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
# compute loss
distr_args = self.proj_dist_args(dec_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
loss = distr.loss(future_target)
return loss.mean()
class TransformerPredictionNetwork(TransformerNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 16,590 | 33.564583 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from gluonts.model.transformer._network import (
TransformerPredictionNetwork,
TransformerTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerTrainingNetwork:
training_network = TransformerTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,230 | 37.583596 | 100 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v2/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,098 | 36.363158 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v2/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.summary()
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,229 | 36.076923 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v2/layers.py | import tensorflow as tf
from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,517 | 38.982301 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v0/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer
from .loss import gaussian_likelihood
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100, loss=gaussian_likelihood,
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.loss = loss
self.optimizer = optimizer
self.keras_model = None
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
loc, scale = GaussianLayer(1, name='main_output')(x)
return input_shape, inputs, [loc, scale]
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
loc, scale = GaussianLayer(1, name='main_output')(x)
return input_shape, inputs, [loc, scale]
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 5,812 | 37.243421 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v0/layers.py | from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[2]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
output_mu = K.dot(x, self.kernel_1) + self.bias_1
output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]
| 1,954 | 44.465116 | 85 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v1/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,098 | 36.363158 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v1/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.summary()
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,181 | 36.020619 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model.v1/layers.py | import tensorflow as tf
from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,517 | 38.982301 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model/lstm_save.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=40, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,098 | 36.363158 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model/lstm.py | #from deepar.model import NNModel
#from deepar.model.layers import GaussianLayer
#from deepar.model.loss import gaussian_likelihood
from . import NNModel
from .layers import GaussianLayer, StudentTLayer
from .loss import gaussian_likelihood, gaussian_sampler
from .loss import studentt_likelihood, studentt_sampler
from keras.layers import Input, Dense, Input
from keras.models import Model
from keras.layers import LSTM
from keras import backend as K
import logging
import numpy as np
logger = logging.getLogger('deepar')
class DeepAR(NNModel):
def __init__(self, ts_obj, steps_per_epoch=50, epochs=100,
distribution = 'Gaussian',
optimizer='adam', with_custom_nn_structure=None):
self.ts_obj = ts_obj
self.inputs, self.z_sample = None, None
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.optimizer = optimizer
self.keras_model = None
if distribution == 'Gaussian':
self.loss = gaussian_likelihood
self.distrib = GaussianLayer
self.sampler = gaussian_sampler
elif distribution == 'StudentT':
self.loss = studentt_likelihood
self.distrib = StudentTLayer
self.sampler = studentt_sampler
else:
pass
if with_custom_nn_structure:
self.nn_structure = with_custom_nn_structure
else:
self.nn_structure = DeepAR.basic_structure
self._output_layer_name = 'main_output'
self.get_intermediate = None
@staticmethod
def basic_structure(**kwargs):
"""
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = LSTM(4, return_sequences=True)(inputs)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
theta = distrib(1, name='main_output')(x)
return input_shape, inputs, theta
def instantiate_and_fit(self, verbose=False):
input_shape, inputs, theta = self.nn_structure()
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
@property
def model(self):
return self.keras_model
def predict_theta_from_input(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
@staticmethod
def encoder_decoder(**kwargs):
#context_len=40, prediction_len=2, input_dim=1,
# num_cells = 40, num_layers = 2, dropout_rate = 0.1):
"""
follow the deepar 2-layers lstm encoder-decoder
This is the method that needs to be patched when changing NN structure
:return: inputs_shape (tuple), inputs (Tensor), [loc, scale] (a list of theta parameters
of the target likelihood)
"""
context_len = kwargs['context_len']
prediction_len = kwargs['prediction_len']
input_dim = kwargs['input_dim']
num_cells = kwargs['num_cells']
num_layers = kwargs['num_layers']
dropout_rate = kwargs['dropout_rate']
distrib = kwargs['distrib']
seqlen = context_len + prediction_len
input_shape = (seqlen, input_dim)
inputs = Input(shape=input_shape)
x = inputs
for l in range(num_layers):
x = LSTM(num_cells, return_sequences=True, dropout=dropout_rate)(x)
#x = Dense(3, activation='relu')(x)
#loc, scale = GaussianLayer(1, name='main_output')(x)
#return input_shape, inputs, [loc, scale]
theta = distrib(1, name='main_output')(x)
#theta = StudentTLayer(1, name='main_output')(x)
return input_shape, inputs, theta
def fit(self, verbose=False,
context_len=20, prediction_len=2, input_dim=1,
num_cells = 40, num_layers = 2, dropout_rate = 0.1):
input_shape, inputs, theta = self.nn_structure(
distrib = self.distrib,
context_len=context_len, prediction_len=prediction_len, input_dim=input_dim,
num_cells = num_cells, num_layers = num_layers, dropout_rate = dropout_rate)
model = Model(inputs, theta[0])
model.compile(loss=self.loss(theta[1:]), optimizer=self.optimizer)
model.summary()
model.fit_generator(ts_generator(self.ts_obj,
input_shape[0]),
steps_per_epoch=self.steps_per_epoch,
epochs=self.epochs)
if verbose:
logger.debug('Model was successfully trained')
self.keras_model = model
self.get_intermediate = K.function(inputs=[self.model.input],
outputs=self.model.get_layer(self._output_layer_name).output)
def predict(self, input_list):
"""
This function takes an input of size equal to the n_steps specified in 'Input' when building the
network
:param input_list:
:return: [[]], a list of list. E.g. when using Gaussian layer this returns a list of two list,
corresponding to [[mu_values], [sigma_values]]
"""
if not self.get_intermediate:
raise ValueError('TF model must be trained first!')
return self.get_intermediate(input_list)
def get_sample(self, theta):
return self.sampler(theta)
def ts_generator(ts_obj, n_steps):
"""
This is a util generator function for Keras
:param ts_obj: a Dataset child class object that implements the 'next_batch' method
:param n_steps: parameter that specifies the length of the net's input tensor
:return:
"""
while 1:
#batch = ts_obj.next_batch(1, n_steps)
#batch = ts_obj.next_batch(32, n_steps)
batch = ts_obj.next_batch(32, n_steps)
yield batch[0], batch[1]
| 7,229 | 36.076923 | 104 | py |
rankpredictor | rankpredictor-master/src/indycar/model/deepartf/model/layers.py | import tensorflow as tf
from keras import backend as K
from keras.initializers import glorot_normal
from keras.layers import Layer
class GaussianLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.kernel_1, self.kernel_2, self.bias_1, self.bias_2 = [], [], [], []
super(GaussianLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = input_shape[-1]
self.kernel_1 = self.add_weight(name='kernel_1',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_1 = self.add_weight(name='bias_1',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GaussianLayer, self).build(input_shape)
def call(self, x):
#output_mu = K.dot(x, self.kernel_1) + self.bias_1
#output_sig = K.dot(x, self.kernel_2) + self.bias_2
output_mu = tf.matmul(x, self.kernel_1) + self.bias_1
output_sig = tf.matmul(x, self.kernel_2) + self.bias_2
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
return [output_mu, output_sig_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1],self.output_dim),
(input_shape[0], input_shape[1],self.output_dim)]
#return [(input_shape[0], self.output_dim),
# (input_shape[0], self.output_dim)]
#
# studentT
#
class StudentTLayer(Layer):
"""
mu
Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
sigma
Tensor containing the standard deviations, of shape
`(*batch_shape, *event_shape)`.
nu
Nonnegative tensor containing the degrees of freedom of the distribution,
of shape `(*batch_shape, *event_shape)`.
"""
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.num_parameters = 3
self.kernels = [[] for x in range(3)]
self.biases = [[] for x in range(3)]
super(StudentTLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""
input: shape (NTC)
"""
n_weight_rows = input_shape[2]
for i in range(self.num_parameters):
self.kernels[i] = self.add_weight(name='kernel_%d'%i,
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
for i in range(self.num_parameters):
self.biases[i] = self.add_weight(name='bias_%d'%i,
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(StudentTLayer, self).build(input_shape)
def call(self, x):
"""
return mu, sigma, nu
"""
output_mu = tf.matmul(x, self.kernels[0]) + self.biases[0]
output_sig = tf.matmul(x, self.kernels[1]) + self.biases[1]
output_sig_pos = K.log(1 + K.exp(output_sig)) + 1e-06
output_nu = tf.matmul(x, self.kernels[2]) + self.biases[2]
output_nu_pos = K.log(1 + K.exp(output_nu)) + 1e-06 + 2.0
return [output_mu, output_sig_pos, output_nu_pos]
def compute_output_shape(self, input_shape):
"""
The assumption is the output ts is always one-dimensional
"""
return [(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim),
(input_shape[0], input_shape[1], self.output_dim)]
| 4,517 | 38.982301 | 81 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss-masked/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss-masked/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss-masked/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss-masked/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
from gluonts.support.util import weighted_average
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerWeightedFullLossMaskedNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
weight_coef: float = 9,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
self.weight_coef = weight_coef
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerWeightedFullLossMaskedTrainingNetwork(TransformerWeightedFullLossMaskedNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
#concat all targets
all_output = F.concat(
enc_out,
dec_output,
dim=1
)
# compute loss
#distr_args = self.proj_dist_args(dec_output)
distr_args = self.proj_dist_args(all_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
# original loss
#loss = distr.loss(future_target)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
loss = distr.loss(target)
#loss = distr.loss(future_target)
## (batch_size, seq_len, *target_shape)
future_observed_values = F.ones_like(future_target)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=self.history_length,
),
future_observed_values,
dim=1,
)
## mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights1 = (
observed_values
if (len(self.target_shape) == 0)
else observed_values.min(axis=-1, keepdims=False)
)
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
#if _hybridized_:
if False:
r = F.slice_axis(target, axis=1, begin=-2, end=None)
l = F.slice_axis(target, axis=1, begin=-4, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*self.weight_coef
w = F.where(r==l, w1, w9)
loss_weights2 = w
else:
r = F.slice_axis(target, axis=1, begin=2, end=None)
l = F.slice_axis(target, axis=1, begin=0, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*self.weight_coef
w = F.where(r==l, w1, w9)
s = F.slice_axis(target, axis=1, begin=0, end=2)
z = F.ones_like(s)
loss_weights2 = F.concat(z, w)
loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
#
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights, axis=1
)
# need to mask possible nans and -inf
#loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
#return weighted loss of future
#loss = F.slice_axis(weighted_loss, axis=1, begin=-2, end=None)
loss = weighted_loss
return loss.mean()
class TransformerWeightedFullLossMaskedPredictionNetwork(TransformerWeightedFullLossMaskedNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 19,508 | 33.106643 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-fullloss-masked/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import (
TransformerWeightedFullLossMaskedPredictionNetwork,
TransformerWeightedFullLossMaskedTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerWeightedFullLossMaskedEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
weight_coef: float = 9,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.weight_coef = weight_coef
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
#else get_lags_for_frequency(freq_str=freq, num_lags=1)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerWeightedFullLossMaskedTrainingNetwork:
training_network = TransformerWeightedFullLossMaskedTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerWeightedFullLossMaskedPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,478 | 37.754658 | 100 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-masked/trans_encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
InputLayer,
)
class TransformerEncoder(HybridBlock):
@validated()
def __init__(self, encoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder_length = encoder_length
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.enc_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.enc_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.enc_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.enc_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.enc_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postfftransformerprocessblock_",
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# input layer
inputs = self.enc_input_layer(data)
# self-attention
data_self_att, _ = self.enc_self_att(
self.enc_pre_self_att(inputs, None)
)
data = self.enc_post_self_att(data_self_att, inputs)
# feed-forward
data_ff = self.enc_ff(data)
data = self.enc_post_ff(data_ff, data)
return data
| 3,242 | 33.5 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-masked/layers.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.model.common import Tensor
def split_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Returns a tensor with head dimension folded into batch and last dimension divided by the number of heads.
Parameters
----------
x
Tensor of shape (batch_size, time_length, dim).
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size * heads, time_length, dim_per_head).
"""
# (batch_size, time_length, heads, dim_per_head)
x = F.reshape(data=x, shape=(0, -1, heads, dim_per_head))
# (batch_size, heads, time_length, dim/heads)
x = F.transpose(data=x, axes=(0, 2, 1, 3))
# (batch_size * heads, time_length, dim/heads)
return F.reshape(data=x, shape=(-3, -1, dim_per_head))
def dot_attention(
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
dropout: float = 0.0,
) -> Tensor:
r"""
Parameters
----------
queries
Attention queries of shape (n, lq, d)
keys
Attention keys of shape (n, lk, d)
values
Attention values of shape (n, lk, dv)
mask
Optional mask tensor
dropout
Dropout rate
Returns
-------
'Context' vectors for each query of shape (n, lq, dv)
"""
# (n, lq, lk)
logits = F.batch_dot(lhs=queries, rhs=keys, transpose_b=True)
if mask is not None:
logits = F.broadcast_add(logits, mask)
probs = F.softmax(logits, axis=-1)
probs = F.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return F.batch_dot(lhs=probs, rhs=values)
def combine_heads(F, x: Tensor, dim_per_head: int, heads: int) -> Tensor:
r"""
Parameters
----------
x
Tensor of shape (batch_size * heads, time_length, dim_per_head)
dim_per_head
Dimension per head
heads
Number of heads
Returns
-------
Tensor of shape (batch_size, time_length, dim)
"""
# (batch_size, heads, time_length, dim_per_head)
x = F.reshape(data=x, shape=(-4, -1, heads, 0, dim_per_head))
# (batch_size, time_length, heads, dim_per_head)
x = F.transpose(x, axes=(0, 2, 1, 3))
# (batch_size, time_length, dim)
return F.reshape(x, shape=(-1, 0, dim_per_head * heads))
class LayerNormalization(HybridBlock):
"""
Implements layer normalization as proposed in [BKH16]_.
"""
def __init__(
self,
scale_init: str = "ones",
shift_init: str = "zeros",
eps: float = 1e-06,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scale_init = scale_init
self.shift_init = shift_init
with self.name_scope():
self.lnorm = mx.gluon.nn.LayerNorm(
axis=-1,
gamma_initializer=self.scale_init,
beta_initializer=self.shift_init,
epsilon=eps,
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, data: Tensor) -> Tensor:
r"""
Normalizes hidden units of data as follows:
data = scale * (data - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
Parameters
----------
data
Data to normalize of shape (d0, ..., dn, num_hidden)
Returns
-------
Normalized inputs of shape: (d0, ..., dn, num_hidden)
"""
return self.lnorm(data)
class InputLayer(HybridBlock):
r"""
Transforms the input vector to model_size with an one-layer MPL, i.e.,
(batch_size, time_length, input_dim) -> (batch_size, time_length, model_size)
"""
def __init__(self, model_size: int = 64, **kwargs) -> None:
super().__init__(**kwargs)
self.model_size = model_size
with self.name_scope():
self.net = mx.gluon.nn.Dense(units=self.model_size, flatten=False)
def hybrid_forward(self, F, data: Tensor, *args):
return self.net(data)
class MultiHeadAttentionBase(HybridBlock):
"""
Base class for Multi-head attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
att_dim_in % heads == 0
), "Number of heads {} must divide attention att_dim_in {}".format(
heads, att_dim_in
)
self.att_dim_in = att_dim_in
self.heads = heads
self.att_dim_out = att_dim_out
self.dropout = dropout
self.dim_per_head = self.att_dim_in // self.heads
with self.name_scope():
self.dense_att = mx.gluon.nn.Dense(
units=self.att_dim_out, flatten=False
)
def _attend(
self,
F,
queries: Tensor,
keys: Tensor,
values: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Returns context vectors of multi-head dot attention.
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, dim)
keys
Keys tensor of shape (batch_size, memory_max_length, dim)
values
Values tensor of shape (batch_size, memory_max_length, dim)
mask
Returns
-------
Context vectors of shape (batch_size, query_max_length, att_dim_out)
"""
# scale by 1/sqrt(dim_per_head)
queries = queries * (self.dim_per_head ** -0.5)
# (batch_size * heads, length, dim/heads)
queries = split_heads(F, queries, self.dim_per_head, self.heads)
keys = split_heads(F, keys, self.dim_per_head, self.heads)
values = split_heads(F, values, self.dim_per_head, self.heads)
# (batch_size * heads, query_max_length, dim_per_head)
contexts = dot_attention(
F, queries, keys, values, mask=mask, dropout=self.dropout
)
# (batch_size, query_max_length, input_dim)
contexts = combine_heads(F, contexts, self.dim_per_head, self.heads)
# contexts: (batch_size, query_max_length, output_dim)
contexts = self.dense_att(contexts)
return contexts
def hybrid_forward(self, F, *args, **kwargs):
raise NotImplementedError
class MultiHeadSelfAttention(MultiHeadAttentionBase):
r"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_satt = mx.gluon.nn.Dense(
units=self.att_dim_in * 3, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
inputs: Tensor,
mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Optional[Tensor]]] = None,
) -> Tuple[Tensor, Optional[Dict]]:
r"""
Computes multi-head attention on a set of inputs, serving as queries,
keys, and values. If sequence lengths are provided, they will be used
to mask the attention scores. May also use a cache of previously
computed inputs.
Parameters
----------
inputs
Input data of shape (batch_size, max_length, att_dim_in)
mask
Optional tensor to mask attention scores
cache
Optional dictionary of previously computed keys and values
Returns
-------
Tensor
A tensor of shape (batch_size, max_length, att_dim_out)
"""
# Q = K = V -> Q * W_q, K * W_k, V * W_v
# combined: (batch_size, max_length, att_dim_in * 3)
combined = self.dense_pre_satt(inputs)
# split into queries, keys and values
# (batch_size, max_length, att_dim_in)
queries, keys, values = F.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys and values to cache, update the cache
keys = cache["k"] = (
keys
if "k" not in cache.keys()
else F.concat(cache["k"], keys, dim=1)
)
values = cache["v"] = (
values
if "v" not in cache.keys()
else F.concat(cache["v"], values, dim=1)
)
return self._attend(F, queries, keys, values, mask), cache
class MultiHeadAttention(MultiHeadAttentionBase):
r"""
Multi-head attention layer for queries independent from keys/values.
Parameters
----------
att_dim_in
Attention dimension (number of hidden units)
heads
Number of attention heads
att_dim_out
Output dimension (number of output units)
dropout
Dropout rate on attention scores
"""
def __init__(
self,
att_dim_in: int = 32,
heads: int = 8,
att_dim_out: int = 32,
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(att_dim_in, heads, att_dim_out, dropout, **kwargs)
with self.name_scope():
self.dense_pre_att_q = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_k = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
self.dense_pre_att_v = mx.gluon.nn.Dense(
units=self.att_dim_in, flatten=False
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, queries: Tensor, memory: Tensor, mask: Optional[Tensor] = None
) -> Tensor:
r"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A mask tensor may also be used to mask the attention scores.
Returns a tensor of shape (batch_size, max_length, att_dim_out).
Parameters
----------
queries
Queries tensor of shape (batch_size, query_max_length, att_dim_in)
memory
Memory tensor to attend to of shape (batch_size, memory_max_length, att_dim_in)
mask
Optional tensor to mask attention scores
Returns
-------
Tensor of shape (batch_size, query_seq_len, att_dim_out)
"""
# Q -> Q * W_q
# K = V -> K * W_k, V * W_v
# (batch, query_max_length, att_dim_in)
queries = self.dense_pre_att_q(queries)
# (batch, memory_max_length, att_dim_in)
keys = self.dense_pre_att_k(memory)
# (batch, memory_max_length, att_dim_in)
values = self.dense_pre_att_v(memory)
return self._attend(F, queries, keys, values, mask=mask)
class TransformerFeedForward(HybridBlock):
r"""
Position-wise feed-forward network with activation.
.. math::
activation(XW_1 + b_1)W_2 + b_2
:math:`W_1`: (batch_size, d, inner_dim)
:math:`W_2`: (batch_size, inner_dim, out_dim)
"""
def __init__(
self,
inner_dim: int = 32, # W1: (batch_size, d, inner_dim)
out_dim: int = 32, # W2: (batch_size, inner_dim, out_dim)
act_type: str = "softrelu",
dropout: float = 0.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.inner_dim = inner_dim
self.out_dim = out_dim
self.dropout = dropout
self.act_type = act_type
with self.name_scope():
self.mlp = mx.gluon.nn.HybridSequential()
self.mlp.add(
mx.gluon.nn.Dense(
units=self.inner_dim,
use_bias=True,
activation=self.act_type,
flatten=False,
)
)
if self.dropout > 0.0:
self.mlp.add(mx.gluon.nn.Dropout(self.dropout))
self.mlp.add(
mx.gluon.nn.Dense(units=out_dim, use_bias=True, flatten=False)
) # no activation
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
r"""
Position-wise feed-forward network with activation.
Parameters
----------
x
Tensor of shape (batch_size, d, in_dim)
Returns
-------
Tensor of shape (batch_size, d1, out_dim)
"""
return self.mlp(x)
class TransformerProcessBlock(HybridBlock):
r"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self, sequence: str, dropout: float, **kwargs) -> None:
super().__init__(**kwargs)
self.sequence = sequence
self.dropout = dropout
self.layer_norm = None
if "n" in sequence:
self.layer_norm = LayerNormalization()
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self, F, data: Tensor, prev: Optional[Tensor] = None
) -> Tensor:
r"""
Apply processing sequence to data with optional previous input.
Parameters
----------
data
Input data of shape: (batch_size, length, num_hidden)
prev
Previous data of shape (batch_size, length, num_hidden)
Returns
-------
Processed data of shape (batch_size, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert (
"r" not in self.sequence
), "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = F.broadcast_add(data, prev)
elif step == "n":
data = self.layer_norm(data)
elif step == "d":
if self.dropout > 0.0:
data = F.Dropout(data, p=self.dropout)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
| 15,991 | 27.506239 | 112 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-masked/trans_decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Dict, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
from gluonts.model.transformer.layers import (
TransformerProcessBlock,
TransformerFeedForward,
MultiHeadSelfAttention,
MultiHeadAttention,
InputLayer,
)
class TransformerDecoder(HybridBlock):
@validated()
def __init__(self, decoder_length: int, config: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.decoder_length = decoder_length
self.cache = {}
with self.name_scope():
self.enc_input_layer = InputLayer(model_size=config["model_dim"])
self.dec_pre_self_att = TransformerProcessBlock(
sequence=config["pre_seq"],
dropout=config["dropout_rate"],
prefix="pretransformerprocessblock_",
)
self.dec_self_att = MultiHeadSelfAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadselfattention_",
)
self.dec_post_self_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postselfatttransformerprocessblock_",
)
self.dec_enc_att = MultiHeadAttention(
att_dim_in=config["model_dim"],
heads=config["num_heads"],
att_dim_out=config["model_dim"],
dropout=config["dropout_rate"],
prefix="multiheadattention_",
)
self.dec_post_att = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postatttransformerprocessblock_",
)
self.dec_ff = TransformerFeedForward(
inner_dim=config["model_dim"] * config["inner_ff_dim_scale"],
out_dim=config["model_dim"],
act_type=config["act_type"],
dropout=config["dropout_rate"],
prefix="transformerfeedforward_",
)
self.dec_post_ff = TransformerProcessBlock(
sequence=config["post_seq"],
dropout=config["dropout_rate"],
prefix="postffransformerprocessblock_",
)
def cache_reset(self):
self.cache = {}
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
data: Tensor,
enc_out: Tensor,
mask: Optional[Tensor] = None,
is_train: bool = True,
) -> Tensor:
"""
A transformer encoder block consists of a self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
# embedding
inputs = self.enc_input_layer(data)
# self-attention
data_att, cache = self.dec_self_att(
self.dec_pre_self_att(inputs, None),
mask,
self.cache.copy() if not is_train else None,
)
data = self.dec_post_self_att(data_att, inputs)
# encoder attention
data_att = self.dec_enc_att(data, enc_out)
data = self.dec_post_att(data_att, data)
# feed-forward
data_ff = self.dec_ff(data)
data = self.dec_post_ff(data_ff, data)
if not is_train:
self.cache = cache.copy()
return data
| 4,259 | 32.543307 | 118 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-masked/_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List, Optional
# Third-party imports
import mxnet as mx
# First-party imports
from gluonts.block.scaler import NOPScaler, MeanScaler
from gluonts.block.feature import FeatureEmbedder
from gluonts.core.component import validated
from gluonts.distribution import DistributionOutput
from gluonts.model.common import Tensor
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
from gluonts.support.util import weighted_average
LARGE_NEGATIVE_VALUE = -99999999
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class TransformerWeightedMaskedNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
cardinality: List[int],
embedding_dimension: int,
lags_seq: List[int],
scaling: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.scaling = scaling
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.distr_output = distr_output
assert len(set(lags_seq)) == len(
lags_seq
), "no duplicated lags allowed!"
lags_seq.sort()
self.lags_seq = lags_seq
self.target_shape = distr_output.event_shape
with self.name_scope():
self.proj_dist_args = distr_output.get_args_proj()
self.encoder = encoder
self.decoder = decoder
self.embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
if scaling:
self.scaler = MeanScaler(keepdims=True)
else:
self.scaler = NOPScaler(keepdims=True)
@staticmethod
def get_lagged_subsequences(
F,
sequence: Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and I = len(indices), containing lagged
subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
# we must have: sequence_length - lag_index - subsequences_length >= 0
# for all lag_index, hence the following assert
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(
F.slice_axis(
sequence, axis=1, begin=begin_index, end=end_index
)
)
return F.stack(*lagged_values, axis=-1)
def create_network_input(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, num_features, history_length)
past_target: Tensor, # (batch_size, history_length, 1)
past_observed_values: Tensor, # (batch_size, history_length)
future_time_feat: Optional[
Tensor
], # (batch_size, num_features, prediction_length)
future_target: Optional[Tensor], # (batch_size, prediction_length)
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Creates inputs for the transformer network.
All tensor arguments should have NTC layout.
"""
if future_time_feat is None or future_target is None:
time_feat = past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
)
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = F.concat(
past_time_feat.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_time_feat,
dim=1,
)
sequence = F.concat(past_target, future_target, dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
F=F,
sequence=sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length,
)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target.slice_axis(
axis=1, begin=-self.context_length, end=None
),
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
)
embedded_cat = self.embedder(feat_static_cat)
# in addition to embedding features, use the log scale as it can help prediction too
# (batch_size, num_features + prod(target_shape))
static_feat = F.concat(
embedded_cat,
F.log(scale)
if len(self.target_shape) == 0
else F.log(scale.squeeze(axis=1)),
dim=1,
)
repeated_static_feat = static_feat.expand_dims(axis=1).repeat(
axis=1, repeats=subsequences_length
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(
-1,
subsequences_length,
len(self.lags_seq) * prod(self.target_shape),
),
)
# (batch_size, sub_seq_len, input_dim)
inputs = F.concat(input_lags, time_feat, repeated_static_feat, dim=-1)
return inputs, scale, static_feat
@staticmethod
def upper_triangular_mask(F, d):
mask = F.zeros_like(F.eye(d))
for k in range(d - 1):
mask = mask + F.eye(d, d, k + 1)
return mask * LARGE_NEGATIVE_VALUE
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class TransformerWeightedMaskedTrainingNetwork(TransformerWeightedMaskedNetwork):
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
) -> Tensor:
"""
Computes the loss for training Transformer, all inputs tensors representing time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
Returns
-------
Loss with shape (batch_size, context + prediction_length, 1)
"""
# create the inputs for the encoder
inputs, scale, _ = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=future_time_feat,
future_target=future_target,
)
enc_input = F.slice_axis(
inputs, axis=1, begin=0, end=self.context_length
)
dec_input = F.slice_axis(
inputs, axis=1, begin=self.context_length, end=None
)
# pass through encoder
enc_out = self.encoder(enc_input)
# input to decoder
dec_output = self.decoder(
dec_input,
enc_out,
self.upper_triangular_mask(F, self.prediction_length),
)
# compute loss
distr_args = self.proj_dist_args(dec_output)
distr = self.distr_output.distribution(distr_args, scale=scale)
# original loss
#loss = distr.loss(future_target)
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
past_target.slice_axis(
axis=1,
begin=self.history_length - self.context_length,
end=None,
),
future_target,
dim=1,
)
# (batch_size, seq_len)
#loss = distr.loss(target)
loss = distr.loss(future_target)
## (batch_size, seq_len, *target_shape)
#observed_values = F.concat(
# past_observed_values.slice_axis(
# axis=1,
# begin=self.history_length - self.context_length,
# end=self.history_length,
# ),
# future_observed_values,
# dim=1,
#)
## mask the loss at one time step iff one or more observations is missing in the target dimensions
## (batch_size, seq_len)
#loss_weights1 = (
# observed_values
# if (len(self.target_shape) == 0)
# else observed_values.min(axis=-1, keepdims=False)
#)
# deal with imbalance problem
# set higher weight for loss at time step when target changes
#loss_weights = (observed_values>0)*1./35 + (observed_values==0)*1.
#print('observed shape:', observed_values.shape)
#import pdb; pdb.set_trace()
#if _hybridized_:
if True:
r = F.slice_axis(target, axis=1, begin=-2, end=None)
l = F.slice_axis(target, axis=1, begin=-4, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
loss_weights2 = w
else:
r = F.slice_axis(target, axis=1, begin=2, end=None)
l = F.slice_axis(target, axis=1, begin=0, end=-2)
w1 = F.ones_like(r)
w9 = F.ones_like(r)*9
w = F.where(r==l, w1, w9)
s = F.slice_axis(target, axis=1, begin=0, end=2)
z = F.ones_like(s)
loss_weights2 = F.concat(z, w)
#loss_weights = F.where(loss_weights1==0, loss_weights1, loss_weights2)
#
weighted_loss = weighted_average(
F=F, x=loss, weights=loss_weights2, axis=1
)
# need to mask possible nans and -inf
#loss = F.where(condition=loss_weights, x=loss, y=F.zeros_like(loss))
#return weighted loss of future
#loss = F.slice_axis(weighted_loss, axis=1, begin=-2, end=None)
loss = weighted_loss
return loss.mean()
class TransformerWeightedMaskedPredictionNetwork(TransformerWeightedMaskedNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one,
# at the first time-step of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq]
def sampling_decoder(
self,
F,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
enc_out: Tensor,
) -> Tensor:
"""
Computes sample paths by unrolling the LSTM starting with a initial input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length, 1).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, ).
enc_out: Tensor
output of the encoder. Shape: (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
a tensor containing sampled paths. Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_time_feat = time_feat.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_enc_out = enc_out.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(
lags, repeated_scale.expand_dims(axis=-1)
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(self.target_shape) * len(self.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
dec_input = F.concat(
input_lags,
repeated_time_feat.slice_axis(axis=1, begin=k, end=k + 1),
repeated_static_feat,
dim=-1,
)
dec_output = self.decoder(dec_input, repeated_enc_out, None, False)
distr_args = self.proj_dist_args(dec_output)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(
distr_args, scale=repeated_scale
)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = F.concat(
repeated_past_target, new_samples, dim=1
)
future_samples.append(new_samples)
# reset cache of the decoder
self.decoder.cache_reset()
# (batch_size * num_samples, prediction_length, *target_shape)
samples = F.concat(*future_samples, dim=1)
# (batch_size, num_samples, *target_shape, prediction_length)
return samples.reshape(
shape=(
(-1, self.num_parallel_samples)
+ self.target_shape
+ (self.prediction_length,)
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
future_time_feat: Tensor,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns predicted samples
-------
"""
# create the inputs for the encoder
inputs, scale, static_feat = self.create_network_input(
F=F,
feat_static_cat=feat_static_cat,
past_time_feat=past_time_feat,
past_target=past_target,
past_observed_values=past_observed_values,
future_time_feat=None,
future_target=None,
)
# pass through encoder
enc_out = self.encoder(inputs)
return self.sampling_decoder(
F=F,
past_target=past_target,
time_feat=future_time_feat,
static_feat=static_feat,
scale=scale,
enc_out=enc_out,
)
| 19,290 | 33.325623 | 116 | py |
rankpredictor | rankpredictor-master/src/indycar/model/transformer-weighted-masked/_estimator.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
from mxnet.gluon import HybridBlock
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.distribution import StudentTOutput, DistributionOutput
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.support.util import copy_parameters
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from gluonts.trainer import Trainer
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SetField,
Transformation,
VstackFeatures,
)
# Relative imports
from ._network import (
TransformerWeightedMaskedPredictionNetwork,
TransformerWeightedMaskedTrainingNetwork,
)
from gluonts.model.transformer.trans_encoder import TransformerEncoder
from gluonts.model.transformer.trans_decoder import TransformerDecoder
class TransformerWeightedMaskedEstimator(GluonEstimator):
"""
Construct a Transformer estimator.
This implements a Transformer model, close to the one described in
[Vaswani2017]_.
.. [Vaswani2017] Vaswani, Ashish, et al. "Attention is all you need."
Advances in neural information processing systems. 2017.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
context_length
Number of steps to unroll the RNN for before computing predictions
(default: None, in which case context_length = prediction_length)
trainer
Trainer object to be used (default: Trainer())
dropout_rate
Dropout regularization parameter (default: 0.1)
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
distr_output
Distribution to use to evaluate observations and sample predictions
(default: StudentTOutput())
model_dim
Dimension of the transformer network, i.e., embedding dimension of the input
(default: 32)
inner_ff_dim_scale
Dimension scale of the inner hidden layer of the transformer's
feedforward network (default: 4)
pre_seq
Sequence that defined operations of the processing block before the main transformer
network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'dn')
post_seq
seq
Sequence that defined operations of the processing block in and after the main
transformer network. Available operations: 'd' for dropout, 'r' for residual connections
and 'n' for normalization (default: 'drn').
act_type
Activation type of the transformer network (default: 'softrelu')
num_heads
Number of heads in the multi-head attention (default: 8)
scaling
Whether to automatically scale the target values (default: true)
lags_seq
Indices of the lagged target values to use as inputs of the RNN
(default: None, in which case these are automatically determined
based on freq)
time_features
Time features to use as inputs of the RNN (default: None, in which
case these are automatically determined based on freq)
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 100)
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: float = 0.1,
cardinality: Optional[List[int]] = None,
embedding_dimension: int = 20,
distr_output: DistributionOutput = StudentTOutput(),
model_dim: int = 32,
inner_ff_dim_scale: int = 4,
pre_seq: str = "dn",
post_seq: str = "drn",
act_type: str = "softrelu",
num_heads: int = 8,
scaling: bool = True,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
use_feat_dynamic_real: bool = False,
use_feat_static_cat: bool = False,
num_parallel_samples: int = 100,
) -> None:
super().__init__(trainer=trainer)
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert (
context_length is None or context_length > 0
), "The value of `context_length` should be > 0"
assert dropout_rate >= 0, "The value of `dropout_rate` should be >= 0"
assert (
cardinality is not None or not use_feat_static_cat
), "You must set `cardinality` if `use_feat_static_cat=True`"
assert cardinality is None or all(
[c > 0 for c in cardinality]
), "Elements of `cardinality` should be > 0"
assert (
embedding_dimension > 0
), "The value of `embedding_dimension` should be > 0"
assert (
num_parallel_samples > 0
), "The value of `num_parallel_samples` should be > 0"
self.freq = freq
self.prediction_length = prediction_length
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.distr_output = distr_output
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_static_cat = use_feat_static_cat
self.cardinality = cardinality if use_feat_static_cat else [1]
self.embedding_dimension = embedding_dimension
self.num_parallel_samples = num_parallel_samples
self.lags_seq = (
lags_seq
if lags_seq is not None
else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.history_length = self.context_length + max(self.lags_seq)
self.scaling = scaling
self.config = {
"model_dim": model_dim,
"pre_seq": pre_seq,
"post_seq": post_seq,
"dropout_rate": dropout_rate,
"inner_ff_dim_scale": inner_ff_dim_scale,
"act_type": act_type,
"num_heads": num_heads,
}
self.encoder = TransformerEncoder(
self.context_length, self.config, prefix="enc_"
)
self.decoder = TransformerDecoder(
self.prediction_length, self.config, prefix="dec_"
)
def create_transformation(self) -> Transformation:
remove_field_names = [
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_STATIC_REAL,
]
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0.0])]
if not self.use_feat_static_cat
else []
)
+ [
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
),
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
),
]
)
def create_training_network(self) -> TransformerWeightedMaskedTrainingNetwork:
training_network = TransformerWeightedMaskedTrainingNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
)
return training_network
def create_predictor(
self, transformation: Transformation, trained_network: HybridBlock
) -> Predictor:
prediction_network = TransformerWeightedMaskedPredictionNetwork(
encoder=self.encoder,
decoder=self.decoder,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
lags_seq=self.lags_seq,
scaling=True,
num_parallel_samples=self.num_parallel_samples,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 12,289 | 37.769716 | 100 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepar_simindy500.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(epochs=100):
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu",
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
estimatorSimple = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu",
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=64
)
)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
opt, args = parser.parse_args()
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 5,656 | 30.603352 | 118 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/prophet_laptime.py | import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import pickle
with open('sim-indy500-laptime-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
from gluonts.dataset.common import ListDataset
prediction_length = 50
freq = "5m"
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
from gluonts.model.prophet import ProphetPredictor
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
predictions = list(predictor.predict(test_ds))
| 1,440 | 32.511628 | 109 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepmodels_indy.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy dataset
dataset:
laptime&rank dataset <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
test_length = 50
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
global_carids = {}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile, run_ts = TS_LAPTIME):
global global_carids, cardinality
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
cardinality = [len(global_carids)]
logger.info(f"number of cars: {cardinality}")
logger.info(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
_train = [{'target': _data[run_ts][rowid, :-test_length].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
_test = [{'target': _data[run_ts][rowid, :].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
# train dataset: cut the last window of length "test_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
#if not os.path.exists(outputfile):
# os.mkdir(outputfile)
#predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
offset = 52-7
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--predictionlen", dest="predictionlen", default=50)
parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--ts", dest="ts_type", default=2)
opt, args = parser.parse_args()
#set the global length
prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
test_length = int(opt.testlen)
ts_type = int(opt.ts_type)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{opt.predictionlen}-c{opt.contextlen}-t{opt.testlen}-ts{opt.ts_type}'
logger.info("runid=%s", runid)
train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 8,731 | 32.328244 | 130 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepfactor_simindy500.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=64
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
opt, args = parser.parse_args()
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 6,554 | 31.939698 | 118 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepmodels_simindy500.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy500 simulation dataset
simulation dataset:
laptime&rank dataset <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
test_length = 50
freq = "1H"
def load_dataset(inputfile):
with open(inputfile, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
cardinality = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
carids = list(_data[1].values())
_train = [{'target': _data[2][rowid, :-test_length].astype(np.float32), 'start': start,
'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
_test = [{'target': _data[2][rowid, :].astype(np.float32), 'start': start, 'feat_static_cat': rowid}
for rowid in range(_data[2].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
cardinality.append(len(carids))
# train dataset: cut the last window of length "test_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
#if not os.path.exists(outputfile):
# os.mkdir(outputfile)
#predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
ts_entry = [tss[7],tss[0],tss[4]]
forecast_entry = [forecasts[7],forecasts[0],forecasts[4]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=[33],
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
parser.add_option("--predictionlen", dest="predictionlen", default=50)
parser.add_option("--testlen", dest="testlen", default=50)
opt, args = parser.parse_args()
#set the global length
prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
test_length = int(opt.testlen)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{opt.predictionlen}-c{opt.contextlen}-t{opt.testlen}'
logger.info("runid=%s", runid)
train_ds, test_ds = load_dataset(opt.inputfile)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 8,467 | 32.470356 | 118 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/prophet_telemetry_tsgluon.py | #!/usr/bin/env python
# coding: utf-8
# # Prophet on telemetry ts dataset
#
# refer to telemetry_dataset_gluonts
# In[1]:
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
# In[2]:
### test on one run
from gluonts.dataset.common import ListDataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
from gluonts.model.prophet import ProphetPredictor
def evaluate_model(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# evaluation
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
#plot a example
#ts_entry = tss[7]
#forecast_entry = forecasts[7]
#plot_prob_forecasts(ts_entry, forecast_entry)
return tss, forecasts
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = 800
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
#ts_entry[-plot_length:].dropna().plot(ax=ax) # plot the time series
plt.plot(ts_entry[-plot_length:].index, ts_entry[-plot_length:].values)
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '.pdf')
# prophet
def evaluate_prophet(test_ds,prediction_length,freq):
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
return evaluate_model(test_ds, predictor)
def run_prophet(prediction_length,freq):
train_ds, test_ds = make_dataset(runs, prediction_length,freq)
evaluate_prophet(test_ds,prediction_length,freq)
def run_prophet_nonan(prediction_length,freq):
train_ds, test_ds = make_dataset_nonan(prediction_length,freq)
evaluate_prophet(test_ds,prediction_length,freq)
# ## Datasets
#
# In[13]:
import pickle
### load indy
with open('telemetry-gluonts-all-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
#freq, train_set, test_set = pickle.load(f, encoding='latin1')
#train_ds = ListDataset(train_set, freq=freq)
#test_ds = ListDataset(test_set, freq=freq)
# In[4]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[5]:
print(f"events: {events}")
# In[14]:
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# global configuration
TS_VSPEED=1
TS_DISTANCE=2
run_ts = TS_VSPEED
# In[16]:
tss, forecast =evaluate_prophet(test_ds,prediction_length,freq)
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'prophet-tele-00')
# In[12]:
# test all
#run_prophet_nonan(-1, 50, '1D')
# ### R-predictor
# In[17]:
from gluonts.model.r_forecast import RForecastPredictor
est = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
arima = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length)
#
##train_ds, test_ds = make_dataset_nonan(1, prediction_length,freq)
##train_ds, test_ds = make_dataset(prediction_length,freq)
#
tss, forecast = evaluate_model(test_ds, est)
#
#
## In[18]:
#
#
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'ets-tele-00')
#
#
## In[19]:
#
#
tss, forecast = evaluate_model(test_ds, arima)
ts_entry = tss[7]
forecast_entry = forecast[7]
plot_prob_forecasts(ts_entry, forecast_entry,'arima-tele-00')
#
#
## ### DeepAR
#
## In[21]:
#
#
#with open('telemetry-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
# global_carids, telemetry_data_indy = pickle.load(f, encoding='latin1')
exit(0)
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
#cardinality = [len(global_carids)]
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length = 3*prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(2)",
epochs=500,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
# In[ ]:
#train_ds, test_ds, train_set, test_set = make_dataset_interpolate(prediction_length,freq)
#train_ds, test_ds, train_set, test_set = make_dataset_interpolate(prediction_length,'1S')
# In[23]:
predictor = estimator.train(train_ds)
modeldir = 'deepar-tele'
if not os.path.exists(modeldir):
os.mkdir(modeldir)
predictor.serialize(Path(modeldir))
# In[24]:
from gluonts.evaluation.backtest import make_evaluation_predictions
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
# In[25]:
forecasts = list(forecast_it)
# In[26]:
tss = list(ts_it)
# In[ ]:
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, 'deepar-tele-00')
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
| 6,146 | 22.551724 | 118 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepar_laptime-rank.py | #!/usr/bin/env python
# coding: utf-8
# # DeepAR on laptime&rank dataset
#
# laptime&rank dataset
# <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
# In[1]:
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
from pathlib import Path
print("deepar.py <ts_type> <epochs> <gpuid>")
import sys
if len(sys.argv)!=4:
exit(-1)
ts_type = int(sys.argv[1])
epochs = int(sys.argv[2])
gpudevice = int(sys.argv[3])
runid='deepar_indy_e%d_ts%d'%(epochs, ts_type)
# In[2]:
import pickle
with open('laptime_rank-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
# In[3]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[4]:
print(f"events: {events}")
# To download one of the built-in datasets, simply call get_dataset with one of the above names. GluonTS can re-use the saved dataset so that it does not need to be downloaded again: simply set `regenerate=False`.
# In[5]:
laptime_data[2][2].astype(np.float32)
# In[6]:
# global configuration
prediction_length = 50
freq = "1H"
cardinality = [len(global_carids)]
TS_LAPTIME=2
TS_RANK=3
run_ts = ts_type
# In[7]:
from gluonts.dataset.common import ListDataset
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#_data: eventid, carids, laptime array
for _data in laptime_data:
#_train = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2][:, :-prediction_length]]
#_test = [{'target': x.astype(np.float32), 'start': start}
# for x in _data[2]]
#map rowid -> carno -> global_carid
#carids = list(_data[1].values())
#global_carid = global_carids[_data[1][rowid]]
_train = [{'target': _data[run_ts][rowid, :-prediction_length].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
_test = [{'target': _data[run_ts][rowid, :].astype(np.float32), 'start': start,
'feat_static_cat': global_carids[_data[1][rowid]]}
for rowid in range(_data[run_ts].shape[0]) ]
train_set.extend(_train)
test_set.extend(_test)
# In[8]:
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
# In general, the datasets provided by GluonTS are objects that consists of three main members:
#
# - `dataset.train` is an iterable collection of data entries used for training. Each entry corresponds to one time series
# - `dataset.test` is an iterable collection of data entries used for inference. The test dataset is an extended version of the train dataset that contains a window in the end of each time series that was not seen during training. This window has length equal to the recommended prediction length.
# - `dataset.metadata` contains metadata of the dataset such as the frequency of the time series, a recommended prediction horizon, associated features, etc.
# In[9]:
from gluonts.dataset.util import to_pandas
# ## Training an existing model (`Estimator`)
#
# GluonTS comes with a number of pre-built models. All the user needs to do is configure some hyperparameters. The existing models focus on (but are not limited to) probabilistic forecasting. Probabilistic forecasts are predictions in the form of a probability distribution, rather than simply a single point estimate.
#
# We will begin with GulonTS's pre-built feedforward neural network estimator, a simple but powerful forecasting model. We will use this model to demonstrate the process of training a model, producing forecasts, and evaluating the results.
#
# GluonTS's built-in feedforward neural network (`SimpleFeedForwardEstimator`) accepts an input window of length `context_length` and predicts the distribution of the values of the subsequent `prediction_length` values. In GluonTS parlance, the feedforward neural network model is an example of `Estimator`. In GluonTS, `Estimator` objects represent a forecasting model as well as details such as its coefficients, weights, etc.
#
# In general, each estimator (pre-built or custom) is configured by a number of hyperparameters that can be either common (but not binding) among all estimators (e.g., the `prediction_length`) or specific for the particular estimator (e.g., number of layers for a neural network or the stride in a CNN).
#
# Finally, each estimator is configured by a `Trainer`, which defines how the model will be trained i.e., the number of epochs, the learning rate, etc.
# In[12]:
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
# In[13]:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length=2*prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%d)"%gpudevice,
epochs="%d"%epochs,
learning_rate=1e-3,
num_batches_per_epoch=64
)
)
# After specifying our estimator with all the necessary hyperparameters we can train it using our training dataset `dataset.train` by invoking the `train` method of the estimator. The training algorithm returns a fitted model (or a `Predictor` in GluonTS parlance) that can be used to construct forecasts.
# In[14]:
predictor = estimator.train(train_ds)
outputfile=runid
if not os.path.exists(outputfile):
os.mkdir(outputfile)
predictor.serialize(Path(outputfile))
# With a predictor in hand, we can now predict the last window of the `dataset.test` and evaluate our model's performance.
#
# GluonTS comes with the `make_evaluation_predictions` function that automates the process of prediction and model evaluation. Roughly, this function performs the following steps:
#
# - Removes the final window of length `prediction_length` of the `dataset.test` that we want to predict
# - The estimator uses the remaining data to predict (in the form of sample paths) the "future" window that was just removed
# - The module outputs the forecast sample paths and the `dataset.test` (as python generator objects)
# In[15]:
from gluonts.evaluation.backtest import make_evaluation_predictions
# In[16]:
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
# First, we can convert these generators to lists to ease the subsequent computations.
# In[17]:
forecasts = list(forecast_it)
tss = list(ts_it)
# Indy500 Car 12 WillPower
ts_entry = tss[52]
# first entry of the forecast list
forecast_entry = forecasts[52]
# In[28]:
print(f"Number of sample paths: {forecast_entry.num_samples}")
print(f"Dimension of samples: {forecast_entry.samples.shape}")
print(f"Start date of the forecast window: {forecast_entry.start_date}")
print(f"Frequency of the time series: {forecast_entry.freq}")
# We can also do calculations to summarize the sample paths, such computing the mean or a quantile for each of the 48 time steps in the forecast window.
# In[29]:
print(f"Mean of the future window:\n {forecast_entry.mean}")
print(f"0.5-quantile (median) of the future window:\n {forecast_entry.quantile(0.5)}")
# `Forecast` objects have a `plot` method that can summarize the forecast paths as the mean, prediction intervals, etc. The prediction intervals are shaded in different colors as a "fan chart".
# In[30]:
def plot_prob_forecasts(ts_entry, forecast_entry):
plot_length = 150
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(runid + '.pdf')
# In[31]:
plot_prob_forecasts(ts_entry, forecast_entry)
# We can also evaluate the quality of our forecasts numerically. In GluonTS, the `Evaluator` class can compute aggregate performance metrics, as well as metrics per time series (which can be useful for analyzing performance across heterogeneous time series).
# In[32]:
from gluonts.evaluation import Evaluator
# In[33]:
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
# Aggregate metrics aggregate both across time-steps and across time series.
# In[34]:
print(json.dumps(agg_metrics, indent=4))
# Individual metrics are aggregated only across time-steps.
# In[35]:
item_metrics.head()
| 9,323 | 30.714286 | 428 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/prophet_laptime-rank-v2.py | #!/usr/bin/env python
# coding: utf-8
# # Prophet on laptime&rank dataset
#
# https://gluon-ts.mxnet.io/api/gluonts/gluonts.model.prophet.html
#
# laptime&rank dataset
# <eventid, carids, laptime (totalcars x totallaps), rank (totalcars x totallaps)>; filled with NaN
# In[1]:
# Third-party imports
get_ipython().run_line_magic('matplotlib', 'inline')
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
# In[2]:
### test on one run
from gluonts.dataset.common import ListDataset
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
def make_dataset(runs, prediction_length, freq,
run_ts=2, train_ratio = 0.8,
use_global_dict = True):
"""
split the ts to train and test part by the ratio
"""
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#_data: eventid, carids, laptime array
for _data in _laptime_data:
_train = []
_test = []
#statistics on the ts length
ts_len = [ x.shape[0] for x in _data[run_ts]]
train_len = int(np.max(ts_len) * train_ratio)
print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
for rowid in range(_data[run_ts].shape[0]):
rec = _data[run_ts][rowid, :].copy()
#remove nan
nans, x= nan_helper(rec)
nan_count = np.sum(nans)
rec = rec[~np.isnan(rec)]
# remove short ts
totallen = rec.shape[0]
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
# split and add to dataset record
_train.append({'target': rec[:train_len].astype(np.float32),
'start': start,
'feat_static_cat': carid}
)
# multiple test ts(rolling window as half of the prediction_length)
test_rec_cnt = 0
for endpos in range(totallen, train_len+prediction_length, -int(prediction_length/2)):
_test.append({'target': rec[:endpos].astype(np.float32),
'start': start,
'feat_static_cat': carid}
)
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
# train dataset: cut the last window of length "prediction_length", add "target" and "start" fields
train_ds = ListDataset(train_set, freq=freq)
# test dataset: use the whole dataset, add "target" and "start" fields
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def evaluate_model(test_ds,predictor, output=''):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
# evaluation
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
print(json.dumps(agg_metrics, indent=4))
#plot a example
ts_entry = tss[7]
forecast_entry = forecasts[7]
plot_prob_forecasts(ts_entry, forecast_entry, output)
def plot_prob_forecasts(ts_entry, forecast_entry, output):
plot_length = 50
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax) # plot the time series
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
if output:
plt.savefig(output + '.pdf')
plt.show()
# prophet
def run_prophet(dataset, prediction_length,freq, output=''):
predictor = ProphetPredictor(freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# ets
def run_ets(dataset, prediction_length,freq, output=''):
predictor = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# arima
def run_ets(dataset, prediction_length,freq, output=''):
predictor = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length)
evaluate_model(dataset, predictor, output)
# ## Indy Dataset
#
# In[3]:
import pickle
### load indy
with open('laptime_rank-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data_indy = pickle.load(f, encoding='latin1')
# In[4]:
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
# In[5]:
print(f"events: {events}")
# In[6]:
laptime_data = laptime_data_indy
laptime_data[2][2].astype(np.float32)
# In[7]:
# global configuration
prediction_length = 5
freq = "1min"
cardinality = [len(global_carids)]
TS_LAPTIME=2
TS_RANK=3
run_ts = TS_LAPTIME
# In[8]:
#run on indy500 dataset
train_ds, test_ds,_,_ = make_dataset(1, prediction_length,freq)
# In[9]:
get_ipython().run_line_magic('debug', '')
# In[ ]:
output = f'Prophet-indy-indy500'
run_prophet(test_ds, prediction_length, freq, output)
output = f'ETS-indy-indy500'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-indy-indy500'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
# In[ ]:
# test all
train_ds, test_ds,_,_ = make_dataset(-1, prediction_length,freq)
output = f'Prophet-indy-all'
run_prophet(test_ds, prediction_length, freq, output)
output = f'ETS-indy-all'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-indy-all'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
entry = next(iter(train_ds))
train_series = to_pandas(entry)
entry = next(iter(test_ds))
test_series = to_pandas(entry)
test_series.plot()
plt.axvline(train_series.index[-1], color='r') # end of train dataset
plt.grid(which="both")
plt.legend(["test series", "end of train series"], loc="upper left")
plt.show()
# Individual metrics are aggregated only across time-steps.
# In[ ]:
item_metrics.head()
# In[ ]:
item_metrics.plot(x='MSIS', y='MASE', kind='scatter')
plt.grid(which="both")
plt.show()
# In[ ]:
# ### test on sim-indy dataset
# In[ ]:
import pickle
with open('sim-indy500-laptime-2018.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
laptime_data_simindy = pickle.load(f, encoding='latin1')
print(f"number of runs: {len(laptime_data)}")
# In[ ]:
laptime_data = laptime_data_simindy
#run on indy500 dataset
train_ds, test_ds,_,_ = make_dataset(1, prediction_length,freq, use_global_dict=False)
output = f'Prophet-simindy-indy500'
run_prophet(test_ds, prediction_length, freq, output)
# In[ ]:
get_ipython().run_line_magic('debug', '')
# In[ ]:
output = f'ETS-simindy-indy500'
run_ets(test_ds, prediction_length, freq, output)
output = f'ARIMA-simindy-indy500'
run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
# test all
#train_ds, test_ds,_,_ = make_dataset(-1, prediction_length,freq)
#output = f'Prophet-simindy-all'
#run_prophet(test_ds, prediction_length, freq, output)
#output = f'ETS-simindy-all'
#run_ets(test_ds, prediction_length, freq, output)
#output = f'ARIMA-simindy-all'
#run_arima(test_ds, prediction_length, freq, output)
# In[ ]:
| 9,548 | 25.090164 | 121 | py |
rankpredictor | rankpredictor-master/run/9.DeepModels/experiment/src/deepmodels_indy_gluontsdb.py | #!/usr/bin/env python
# coding: utf-8
"""
Deep Models on the Indy dataset
dataset:
freq, prediction_length, cardinality,train_ds, test_ds
deep models:
deepAR, deepstate, deepFactor
"""
# # DeepAR on simulation indy500 laptime dataset
#
# laptime dataset
# <eventid, carids, laptime (totalcars x totallaps)>
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import logging
import os,sys
from optparse import OptionParser
import pickle
from pathlib import Path
from gluonts.dataset.common import ListDataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator
logger = logging.getLogger(__name__)
#global variables
prediction_length = 50
context_length = 100
freq = "1H"
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events_id={key:idx for idx, key in enumerate(events)}
cardinality = [0]
TS_LAPTIME=2
TS_RANK=3
def load_dataset(inputfile, run_ts = TS_LAPTIME):
global freq, prediction_length, cardinality
with open(inputfile, 'rb') as f:
# have to specify it.
freq, prediction_length, cardinality,train_ds, test_ds = pickle.load(f, encoding='latin1')
logger.info(f"number of cars: {cardinality}")
return train_ds, test_ds
def plot_prob_forecasts(ts_entry, forecast_entry, outputfile):
plot_length = context_length
prediction_intervals = (50.0, 90.0)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
figcnt = len(ts_entry)
#fig, axs = plt.subplots(figcnt, 1, figsize=(10, 7))
#for idx in range(figcnt):
# ts_entry[idx][-plot_length:].plot(ax=axs[idx]) # plot the time series
# forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
# axs[idx].grid(which="both")
# axs[idx].legend(legend, loc="upper left")
for idx in range(figcnt):
fig, axs = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[idx][-plot_length:].plot(ax=axs) # plot the time series
forecast_entry[idx].plot(prediction_intervals=prediction_intervals, color='g')
plt.grid(which="both")
plt.legend(legend, loc="upper left")
plt.savefig(outputfile + '-%d.pdf'%idx)
def evaluate_model(estimator, train_ds, test_ds, outputfile):
predictor = estimator.train(train_ds)
#if not os.path.exists(outputfile):
# os.mkdir(outputfile)
#predictor.serialize(Path(outputfile))
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
logger.info(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
# car12@rank1, car1@rank16, car7@rank33, the index is 7,0,4 accordingly
# Indy500 Car 12 WillPower
#offset = 52-7
offset = 0
ts_entry = [tss[7+offset],tss[0+offset],tss[4+offset]]
forecast_entry = [forecasts[7+offset],forecasts[0+offset],forecasts[4+offset]]
plot_prob_forecasts(ts_entry, forecast_entry, outputfile)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(test_ds))
logger.info(json.dumps(agg_metrics, indent=4))
def init_estimator(model, gpuid, epochs=100):
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx="gpu(%s)"%gpuid,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'deepar_simindy500.py --epochs epochs --input inputpicklefile --output outputfile'
parser = OptionParser(usage)
parser.add_option("--input", dest="inputfile", default='sim-indy500-laptime-2018.pickle')
parser.add_option("--output", dest="outputfile")
parser.add_option("--epochs", dest="epochs", default=100)
parser.add_option("--model", dest="model", default="deepAR")
parser.add_option("--gpuid", dest="gpuid", default=0)
parser.add_option("--contextlen", dest="contextlen", default=100)
#parser.add_option("--predictionlen", dest="predictionlen", default=50)
#parser.add_option("--testlen", dest="testlen", default=50)
parser.add_option("--ts", dest="ts_type", default=2)
opt, args = parser.parse_args()
#set the global length
#prediction_length = int(opt.predictionlen)
context_length = int(opt.contextlen)
#test_length = int(opt.testlen)
ts_type = int(opt.ts_type)
train_ds, test_ds = load_dataset(opt.inputfile, ts_type)
runid = f'-i{opt.outputfile}-e{opt.epochs}-m{opt.model}-p{prediction_length}-c{opt.contextlen}-f{freq}-ts{opt.ts_type}'
logger.info("runid=%s", runid)
estimator = init_estimator(opt.model, opt.gpuid, opt.epochs)
evaluate_model(estimator, train_ds, test_ds, opt.outputfile)
| 7,519 | 30.864407 | 123 | py |
rankpredictor | rankpredictor-master/run/17.StintSimulator/notebook/stint-test-strategy.py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#from indycar.model.stint_predictor_fastrun import *
import indycar.model.stint_simulator as stint
# In[4]:
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen, datamode, loopcnt, featuremode = stint.FEATURE_STATUS):
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint.init()
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._train_len = 40
predictor = stint.load_model(predictionlen, 'oracle',trainid='2018')
ret2 = {}
for i in range(loopcnt):
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
acc = []
for i in ret2.keys():
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
return b, ret2
# ### test low mode
# In[5]:
testcar = 12
runid = 0
loopcnt = 50
if len(sys.argv) > 1:
testcar = int(sys.argv[1])
if len(sys.argv) > 2:
runid = sys.argv[2]
if len(sys.argv) > 3:
loopcnt = int(sys.argv[3])
print('testcar:', testcar, 'runid:', runid, 'loopcnt:', loopcnt)
stint._pitstrategy_lowmode = True
stint._pitstrategy_testcar = testcar
acc, ret = simulation('indy2013-2018', 'Indy500-2018',
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
2, stint.MODE_ORACLE_LAPONLY,loopcnt)
df = pd.concat(ret)
dftestcar_low = df[df['carno']==testcar]
df.to_csv(f'test-strategy-df-lowmode-c{testcar}-r{runid}.csv')
stint._pitstrategy_lowmode = False
stint._pitstrategy_testcar = testcar
acc, ret_high = simulation('indy2013-2018', 'Indy500-2018',
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
2, stint.MODE_ORACLE_LAPONLY,loopcnt)
df = pd.concat(ret_high)
dftestcar_high = df[df['carno']==testcar]
df.to_csv(f'test-strategy-df-highmode-c{testcar}-r{runid}.csv')
stint.get_evalret(dftestcar_low)
stint.get_evalret(dftestcar_high)
print('sample cnt:', len(dftestcar_low))
#check the difference between two distribution of pred_rank
# mode x category pred_sign
f_obs = np.zeros((2, 3))
predsign = dftestcar_low.pred_sign
for idx, sign in enumerate([-1,0,1]):
f_obs[0, idx] = np.sum(predsign == sign)
predsign = dftestcar_high.pred_sign
for idx, sign in enumerate([-1,0,1]):
f_obs[1, idx] = np.sum(predsign == sign)
from scipy import stats
chi, pval, freedom = stats.chi2_contingency(f_obs)[0:3]
print('chi2 test:', chi, pval, freedom)
| 4,098 | 27.866197 | 129 | py |
rankpredictor | rankpredictor-master/run/17.StintSimulator/notebook/stint-test.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#from indycar.model.stint_predictor_fastrun import *
import indycar.model.stint_simulator as stint
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint.init()
stint._dataset_id = 'indy2013-2018-nocarid-context40'
stint._test_event = 'Indy500-2019'
#_test_event = 'Indy500-2019'
stint._feature_mode = stint.FEATURE_STATUS
stint._context_ratio = 0.
stint._task_id = 'timediff' # rank,laptime, the trained model's task
stint._run_ts = stint.COL_TIMEDIFF #COL_LAPTIME,COL_RANK
stint._exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
stint._train_len = 40
predictor = stint.load_model(2, 'oracle',trainid='2018')
# In[155]:
df = stint.run_simulation_pred(predictor, 2, stint.freq, datamode=stint.MODE_ORACLE)
| 2,075 | 28.657143 | 84 | py |
rankpredictor | rankpredictor-master/run/18.FinalTest/notebook/stint-test-strategy.py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#from indycar.model.stint_predictor_fastrun import *
import indycar.model.stint_simulator as stint
# In[4]:
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen, datamode, loopcnt, featuremode = stint.FEATURE_STATUS):
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint.init()
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._train_len = 40
predictor = stint.load_model(predictionlen, 'oracle',trainid='2018')
ret2 = {}
for i in range(loopcnt):
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
acc = []
for i in ret2.keys():
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
return b, ret2
# ### test low mode
# In[5]:
testcar = 12
runid = 0
loopcnt = 50
if len(sys.argv) > 1:
testcar = int(sys.argv[1])
if len(sys.argv) > 2:
runid = sys.argv[2]
if len(sys.argv) > 3:
loopcnt = int(sys.argv[3])
print('testcar:', testcar, 'runid:', runid, 'loopcnt:', loopcnt)
stint._pitstrategy_lowmode = True
stint._pitstrategy_testcar = testcar
acc, ret = simulation('indy2013-2018', 'Indy500-2018',
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
2, stint.MODE_ORACLE_LAPONLY,loopcnt)
df = pd.concat(ret)
dftestcar_low = df[df['carno']==testcar]
df.to_csv(f'test-strategy-df-lowmode-c{testcar}-r{runid}.csv')
stint._pitstrategy_lowmode = False
stint._pitstrategy_testcar = testcar
acc, ret_high = simulation('indy2013-2018', 'Indy500-2018',
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
2, stint.MODE_ORACLE_LAPONLY,loopcnt)
df = pd.concat(ret_high)
dftestcar_high = df[df['carno']==testcar]
df.to_csv(f'test-strategy-df-highmode-c{testcar}-r{runid}.csv')
stint.get_evalret(dftestcar_low)
stint.get_evalret(dftestcar_high)
print('sample cnt:', len(dftestcar_low))
#check the difference between two distribution of pred_rank
# mode x category pred_sign
f_obs = np.zeros((2, 3))
predsign = dftestcar_low.pred_sign
for idx, sign in enumerate([-1,0,1]):
f_obs[0, idx] = np.sum(predsign == sign)
predsign = dftestcar_high.pred_sign
for idx, sign in enumerate([-1,0,1]):
f_obs[1, idx] = np.sum(predsign == sign)
from scipy import stats
chi, pval, freedom = stats.chi2_contingency(f_obs)[0:3]
print('chi2 test:', chi, pval, freedom)
| 4,098 | 27.866197 | 129 | py |
rankpredictor | rankpredictor-master/run/18.FinalTest/notebook/stint-test.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import ipdb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
import inspect
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.ZeroPredictor import ZeroPredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
#from indycar.model.stint_predictor_fastrun import *
import indycar.model.stint_simulator as stint
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint.init()
stint._dataset_id = 'indy2013-2018-nocarid-context40'
stint._test_event = 'Indy500-2019'
#_test_event = 'Indy500-2019'
stint._feature_mode = stint.FEATURE_STATUS
stint._context_ratio = 0.
stint._task_id = 'timediff' # rank,laptime, the trained model's task
stint._run_ts = stint.COL_TIMEDIFF #COL_LAPTIME,COL_RANK
stint._exp_id='timediff2rank' #rank, laptime, laptim2rank, timediff2rank...
stint._train_len = 40
predictor = stint.load_model(2, 'oracle',trainid='2018')
# In[155]:
df = stint.run_simulation_pred(predictor, 2, stint.freq, datamode=stint.MODE_ORACLE)
| 2,075 | 28.657143 | 84 | py |
rankpredictor | rankpredictor-master/run/23.experiments/notebook/RankNet-QuickTest-Slim.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--trainrace", default='Indy500', dest="trainrace")
parser.add_option("--test_event", default='', dest="test_event")
parser.add_option("--suffix", default='', dest="suffix")
parser.add_option("--dataroot", default='data/', dest="dataroot")
parser.add_option("--prediction_length", default=-1,type='int', dest="prediction_length")
parser.add_option("--context_length", default=-1,type='int', dest="context_length")
parser.add_option("--weight_coef", default=-1,type='float', dest="weight_coef")
parser.add_option("--lr", default=1e-3,type='float', dest="learning_rate")
parser.add_option("--patience", default=10,type='int', dest="patience")
parser.add_option("--use_validation", action="store_true", default=False, dest="use_validation")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
train_years = config.get(section, "train_years", fallback='2013,2014,2015,2016,2017')
_train_years = train_years.split(',')
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# new added parameters
_draw_figs = False
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
_weight_coef = 9
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid >= 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.prediction_length > 0:
prediction_length = opt.prediction_length
if opt.context_length > 0:
context_length = opt.context_length
if opt.weight_coef > 0:
_weight_coef = opt.weight_coef
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.test_event != '':
_test_event = opt.test_event
if opt.suffix:
_debugstr = f'-{opt.suffix}'
else:
_debugstr = ''
if opt.learning_rate > 0:
gvar.learning_rate = opt.learning_rate
if opt.patience > 0:
gvar.patience = opt.patience
gvar.use_validation = opt.use_validation
dataroot = opt.dataroot
trainrace = opt.trainrace
#discard year
year = _test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
cur_featurestr = decode_feature_mode(_feature_mode)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
# event -> car#, maxlap
events_info = {
'Phoenix':(256, 1.022, 250),'Indy500':(500,2.5,200),'Texas':(372,1.5,248),
'Iowa':(268,0.894,300),'Pocono':(500,2.5,200),'Gateway':(310,1.25,248)
}
_race_info = {}
# the races have 7 years data
races = ['Indy500', 'Texas','Iowa','Pocono']
years = ['2013','2014','2015','2016','2017','2018','2019']
events = []
for race in races:
events.extend([f'{race}-{x}' for x in years])
events.extend(['Phoenix-2018','Gateway-2018','Gateway-2019'])
events_id={key:idx for idx, key in enumerate(events)}
# dataset shared
dataOutputRoot = "data/"
covergap = 1
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
LAPTIME_DATASET = f'{dataOutputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{dataOutputRoot}/stagedata-{dbid}.pickle'
PITCOVERED_DATASET = f'{dataOutputRoot}/pitcoveredlaps-{dbid}-g{covergap}.pickle'
#dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#trainrace = 'Indy500'
#_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in ['2013','2014','2015','2016','2017']]]
#patch
if trainrace == 'Pocono':
_train_years = ['2013','2015','2016','2017']
_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in _train_years]]
#replace TRAINRACE in pitmodel
if pitmodel.find('TRAINRACE') > 0:
pitmodel = pitmodel.replace('TRAINRACE', trainrace)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
#version = f'IndyCar-d{len(events)}-endlap'
version = f'IndyCar-d{trainrace}-endlap'
# standard output file names
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar._train_events = _train_events
gvar._weight_coef = _weight_coef
gvar.dbid = dbid
gvar.LAPTIME_DATASET = LAPTIME_DATASET
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
#stagedata = pickle.load(f, encoding='latin1')
stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
with open(PITCOVERED_DATASET, 'rb') as f:
pitdata = pickle.load(f, encoding='latin1')
#check it
if not _events == events:
print('Error, events mismatch at:', STAGE_DATASET)
sys.exit(-1)
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
_race_info[event] = (len(carlist), len(laplist)-1, max(laplist))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
### check the inlap
pitdata = {}
for event in events:
alldata, rankdata, acldata, flagdata = stagedata[event]
totallaps = np.max(rankdata.completed_laps.to_numpy())
#pitlaps = rankdata[rankdata['lap_status']=='P'][['completed_laps']].to_numpy()
pitlaps = rankdata[rankdata['lap_status']=='P'].completed_laps.to_numpy()
pitlaps = set(sorted(pitlaps))
pitcoveredlaps = []
for lap in pitlaps:
gap = range(lap - covergap, lap + covergap+1)
#pitcoveredlaps.extend([lap -2,lap-1,lap,lap+1,lap+2])
pitcoveredlaps.extend(gap)
pitcoveredlaps = set(sorted(pitcoveredlaps))
print(event, 'total:', totallaps, 'pitlaps:', len(pitlaps), 'pitcoveredlaps:', len(pitcoveredlaps))
#save
pitdata[event] = [pitlaps, pitcoveredlaps]
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [stagedata, _race_info, events, events_id]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
with open(PITCOVERED_DATASET, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = pitdata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
gvar._race_info = _race_info
gvar.events = events
gvar.events_id = events_id
gvar.maxlap = get_event_info(_test_event)[2]
gvar.events_info = events_info
gvar.trainrace = trainrace
# ### 2. make gluonts db
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
print('trainrace:', trainrace)
print('train_years:', _train_years)
print('trainevents:', _train_events)
print('feature_mode:', _feature_mode, cur_featurestr)
print('trainmodel:', trainmodel)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
print('test_event:', _test_event)
print('prediction_length:', prediction_length)
print('context_length:', context_length)
print('weight_coef:', _weight_coef)
sys.stdout.flush()
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if trainmodel == 'arima':
print('Skip train arima model')
elif _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim,
distr_output = distr_output,use_feat_static = use_feat_static)
if gvar.use_validation:
predictor = estimator.train(train_ds, test_ds)
else:
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# In[10]:
# ### 5. final evaluation
# In[11]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[event] = [pitlaps, pitcoveredlaps]
#with open(PITCOVERED_DATASET, 'rb') as f:
# pitdata = pickle.load(f, encoding='latin1')
#with open(STAGE_DATASET, 'rb') as f:
# stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','SignAcc', 'MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[_test_event][1]
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-oracle-t0-tuned.pickle'
else:
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
#discard old year
#year <- _test_event
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[_test_event]['lasso'])
pitlaps, cautionlaps = get_racestatus_all(rankdata)
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# split evaluation
if True:
for laptype in ['normalpit','cautionpit']:
# select the set
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - set(cautionlaps)
if laptype == 'normalpit':
sellaps = normallaps
clearlaps = cautionlaps
else:
sellaps = cautionlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-1 for x in sellaps]
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
# evaluation start
for clf in ['currank','rf','svr_lin','xgb']:
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
#debug
if clf == 'currank':
print('currank min startlap:', np.min(dfout.startlap.values))
print('currank startlaps:', dfout.startlap.values)
print('currank endlaps:', dfout.endlap.values)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret(dfout)[0]
fsamples, ftss = df2samples_ex(dfout)
#fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,models[clf],configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
print('ranknet min startlap:', np.min(dfout.startlap.values))
print('ranknet startlaps:', dfout.startlap.values)
print('ranknet endlaps:', sorted(set(list((dfout.endlap.values)))))
print('sel laps::', startlaps)
print('clear laps::', clearidx)
print('cautionlaps:', cautionlaps)
dfoutx = dfout[dfout['startlap'].isin(clearidx)]
#dfoutx = dfout[dfout['endlap'].isin(clearidx)]
print('matched cleared endlaps::', sorted(set(list((dfoutx.endlap.values)))))
dfout = dfout[dfout['startlap'].isin(startlaps)]
#dfout = dfout[dfout['endlap'].isin(startlaps)]
print('matched endlaps::', sorted(set(list((dfout.endlap.values)))))
accret = stint.get_evalret(dfout)[0]
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,f'{testmodel}',configname,laptype,accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# end of evaluation
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[12]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[13]:
if _draw_figs:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
#with open(STAGE_DATASET, 'rb') as f:
# stagedata = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#set gobal variable
gvar.rankdata = rankdata
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 38,207 | 36.203505 | 185 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-Slim-beforemultidataset.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--debug", action="store_true", default=False, dest="debug")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# debug test
#_skip_overwrite = False
if opt.debug:
_debugstr = '-debug'
else:
_debugstr = ''
#gpuid = 5
#epochs = 1000
# new added parameters
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#_test_event = 'Indy500-2019'
#year = '2019'
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
# bias of the pitmodel
#_pitmodel_bias = 4
#train model: [deepARW-Oracle, deepAR]
# test the standard deepAR model training and testing
# DeepAR
#trainmodel = 'deepAR'
#testmodel = 'standard'
# Joint
#trainmodel = 'deepAR-multi'
#testmodel = 'joint'
#_joint_train = True
#loopcnt = 2
# transformer
#trainmodel = 'Transformer-Oracle'
#testmodel = 'Transformer-Oracle'
#trainmodel = 'Transformer'
#testmodel = 'Transformer'
#_joint_train = False
#loopcnt = 2
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid > 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
print('year:', year)
print('test_event:', _test_event)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar.events = events
gvar.events_id = events_id
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
# ### 2. make gluonts db
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# ### 5. final evaluation
# In[10]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
else:
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[year]['lasso'])
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[year][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#ml models -oracle
#for clf in ['rf','svr_lin','xgb']:
# print('year:',year,'clf:',clf)
# dfout, accret = eval_sync(preddf_oracle[year][clf],errlist[year])
# fsamples, ftss = df2samples(dfout)
# _, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
# retdata.append([year,models[clf]+'-Oracle',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#dfout, accret = eval_sync(ranknetdf[year]['oracle_mean'], errlist[year],force2int=True)
##fsamples, ftss = df2samples(dfout)
#fsamples, ftss = runs2samples(ranknet_ret[f'oracle-TIMEDIFF-{year}-noinlap-nopitage'],errlist[f'{year}'])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#retdata.append([year,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[11]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[12]:
if False:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 30,803 | 35.282686 | 185 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-BeforeSlim.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
print('skip this event:', events[_data[0]])
break
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if _joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if _joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if _joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if _joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias = 0):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if _forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if _forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
#
# global settings
#
#_savedata = False
_savedata = True
_skip_overwrite = True
#inlap status =
# 0 , no inlap
# 1 , set previous lap
# 2 , set the next lap
_inlap_status = 0
#
# featuremode in [FEATURE_STATUS, FEATURE_PITAGE]:
#
_feature_mode = FEATURE_LEADERPITCNT
_featureCnt = 9
#
# training parameters
#
freq = "1min"
_train_len = 60
prediction_length = 2
context_ratio = 0.
context_length = 60
contextlen = context_length
dataset='rank'
epochs = 1000
#epochs = 10
gpuid = 5
#'deepAR-Oracle','deepARW-Oracle'
_use_weighted_model = True
trainmodel = 'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = False
use_feat_static = _use_cate_feature
distroutput = 'student'
batch_size = 32
#
# test parameters
#
loopcnt = 2
_test_event = 'Indy500-2018'
testmodel = 'oracle'
pitmodel = 'oracle'
year = '2018'
#config2 = get_config()
# In[ ]:
# new added parameters
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
_forecast_mode = 'shortterm'
#_test_event = 'Indy500-2019'
#year = '2019'
#shortterm, stint
#_forecast_mode = 'stint'
#_forecast_mode = 'shortterm'
# bias of the pitmodel
#_pitmodel_bias = 4
#train model: [deepARW-Oracle, deepAR]
# test the standard deepAR model training and testing
# DeepAR
#trainmodel = 'deepAR'
#testmodel = 'standard'
# Joint
#trainmodel = 'deepAR-multi'
#testmodel = 'joint'
#_joint_train = True
#loopcnt = 2
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid > 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
## deduced paramters
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#loopcnt = 2
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('trainmodel:', trainmodel, 'jointtrain:', _joint_train)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel, 'pitmodel bias:', _pitmodel_bias)
print('year:', year, 'test_event:', _test_event)
print('loopcnt:', loopcnt)
print('gpuid:', gpuid)
sys.stdout.flush()
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# ### 1. make laptime dataset
# In[ ]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata,inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 2. make gluonts db
# In[ ]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[ ]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[ ]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# ### 5. final evaluation
# In[ ]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
else:
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[year]['lasso'])
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[year][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#ml models -oracle
#for clf in ['rf','svr_lin','xgb']:
# print('year:',year,'clf:',clf)
# dfout, accret = eval_sync(preddf_oracle[year][clf],errlist[year])
# fsamples, ftss = df2samples(dfout)
# _, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
# retdata.append([year,models[clf]+'-Oracle',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#dfout, accret = eval_sync(ranknetdf[year]['oracle_mean'], errlist[year],force2int=True)
##fsamples, ftss = df2samples(dfout)
#fsamples, ftss = runs2samples(ranknet_ret[f'oracle-TIMEDIFF-{year}-noinlap-nopitage'],errlist[f'{year}'])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#retdata.append([year,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[ ]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0],sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[ ]:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 122,909 | 32.840859 | 189 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-2fmodes.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
import indycar.model.stint_simulator_shortterm_pitmodel as stint
# In[2]:
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[3]:
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
#skip eid > test_eventid
if _data[0] > test_eventid:
print('skip this event:', events[_data[0]])
break
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
if run_ts == COL_RANK and dorerank == True:
sel_rows = []
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
#set it back
#if _data[0]==0:
# print('raw:')
# print(_data[2][:,COL_RANK,0])
# print('true_rank:')
# print(true_rank[:,0])
#_data[2][sel_idx][:,COL_RANK,:] = true_rank
_data[2][sel_idx,COL_RANK,:] = true_rank
#if _data[0]==0:
# _view = _data[2][sel_idx]
# _view[:,COL_RANK,:] = true_rank
# print('view:')
# print(_data[2][:,COL_RANK,0])
# print(_view[:,COL_RANK,0])
# print('rerank:')
# print(_data[2][sel_idx][:,COL_RANK,0])
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
if feature_mode == FEATURE_PITAGE:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:],
rec[COL_LAPS_INSTINT,:]]
}
)
else:
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
if feature_mode == FEATURE_PITAGE:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:context_len],
rec[COL_LAPSTATUS,:context_len],
rec[COL_LAPS_INSTINT,:context_len]
]
}
)
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:context_len],
rec[COL_LAPSTATUS,:context_len]
]
}
)
# testset
# multiple test ts(rolling window as half of the prediction_length)
step = -int(prediction_length/2) if half_moving_win else -prediction_length
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
#for pitage, just assume there is no pit
start_pitage = pitage_rec[-prediction_length - 1]
pitage_rec[-prediction_length:] = np.array([x+start_pitage+1 for x in range(prediction_length)])
if feature_mode == FEATURE_PITAGE:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec,pitage_rec]
#'feat_dynamic_real': [rec[COL_TRACKSTATUS,:endpos],
# rec[COL_LAPSTATUS,:endpos]]
}
)
else:
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
#'feat_dynamic_real': [rec[COL_TRACKSTATUS,:endpos],
# rec[COL_LAPSTATUS,:endpos]]
}
)
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def makedbs():
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
train_ds, test_ds,_,_ = make_dataset_byevent(-1, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, dorerank =True)
dbname = f'{_task_id}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-gluonts-indy-2018.pickle'
#save_dataset(dbname, freq, prediction_length, cardinality,train_ds, test_ds)
with open(dbname, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
return dbname, train_ds, test_ds
# In[4]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW':
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[5]:
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40):
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40):
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
acc = []
for i in ret2.keys():
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def get_alldf(dfx, year=2018):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
df = dfx[id][0]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[6]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[7]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[24]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
#plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
#plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
#plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[9]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
# ## run
# In[10]:
if len(sys.argv) != 2:
print('usage: RankNet-QuickTest.py <configfile>')
sys.exit(0)
configfile = sys.argv[1]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
#
# global settings
#
#_savedata = False
_savedata = True
_skip_overwrite = True
#inlap status =
# 0 , no inlap
# 1 , set previous lap
# 2 , set the next lap
_inlap_status = 0
#
# featuremode in [FEATURE_STATUS, FEATURE_PITAGE]:
#
_feature_mode = FEATURE_STATUS
_featureCnt = 9
#
# training parameters
#
freq = "1min"
_train_len = 40
prediction_length = 2
context_ratio = 0.
context_length = 40
contextlen = context_length
dataset='rank'
epochs = 1000
#epochs = 10
gpuid = 5
#'deepAR-Oracle','deepARW-Oracle'
_use_weighted_model = True
trainmodel = 'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = False
use_feat_static = _use_cate_feature
distroutput = 'student'
batch_size = 32
#
# test parameters
#
loopcnt = 2
_test_event = 'Indy500-2018'
testmodel = 'oracle'
pitmodel = 'oracle'
year = '2018'
#config2 = get_config()
# In[11]:
#checkconfig = [0 if config1[idx] == config2[idx] else 1 for idx in range(len(config1))]
#print(checkconfig)
#print(config1)
#print(config2)
# In[12]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], featurestr[_feature_mode])
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-{catestr[_use_cate_feature]}-c{context_length}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'stagedata-{dbid}.pickle'
EVALUATION_RESULT_DF = f'evaluation_result_d{dataset}.csv'
LONG_FORECASTING_DFS = f'long_forecasting_dfs_d{dataset}.pickle'
FORECAST_FIGS_DIR = f'forecast-figs-d{dataset}/'
# ### 1. make laptime dataset
# In[13]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(outputRoot + LAPTIME_DATASET) and os.path.exists(outputRoot + STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',outputRoot + LAPTIME_DATASET, outputRoot + STAGE_DATASET)
with open(outputRoot + LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(outputRoot + STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata,inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = outputRoot + LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = outputRoot + STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 2. make gluonts db
# In[14]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
train_ds, test_ds,_,_ = make_dataset_byevent(-1, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, dorerank =True)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[15]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[16]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
datasetid = outputRoot + _dataset_id
simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
if _skip_overwrite and os.path.exists(simulation_outfile):
print('Load Simulation Results:',simulation_outfile)
with open(simulation_outfile, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _train_len)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _train_len)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _train_len)
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(simulation_outfile, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 5. final evaluation
# In[17]:
if _skip_overwrite and os.path.exists(outputRoot + EVALUATION_RESULT_DF):
print('Load Evaluation Results:',outputRoot + EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(outputRoot + EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
ranknetdf = dfs
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year]['oracle_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,'Oracle',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year]['oracle_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, 'Oracle',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(outputRoot + EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[19]:
if _skip_overwrite and os.path.exists(outputRoot + LONG_FORECASTING_DFS):
fname = outputRoot + LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
print('eval mid:', mid, 'oracle_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year]['oracle_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
prediction_length,freq,
oracle_mode=stint.MODE_ORACLE,
run_ts = _run_ts,
test_event = _test_event,
test_cars=test_cars,
half_moving_win = 0,
train_ratio = 0.01)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf('oracle-1run-dfout', dfin_oracle)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf('oracle-multimean', dfin_oracle)
#no rerank
df = ranknetdf['2018']['oracle_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf('oracle-norerank-multimean', dfin_oracle)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0],sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(outputRoot + LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[20]:
destdir = outputRoot + FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata['Indy500-2018']
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
print(outputRoot)
print(oracle_eval_result)
| 92,659 | 33.242424 | 182 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest.py | #!/usr/bin/env python
# coding: utf-8
"""
RankNet QuickTest goes through the following steps
makedb laptime
makedb gluonts
train model
evaluate model
draw figures
version 0.4
supported features:
forecast_mode: shortterm, stint
trainmodel : deepAR , deepARW-Oracle, deepAR-multi
testmodel : standard, oracle,pitmodel, joint
Usage: RankNet-QuickTest.py <configfile> [options]
options overwrite the configurations for quick experiments needs, include:
_forecast_mode ;
trainmodel ;
testmodel ;
_joint_train ; False/True
loopcnt ; 100/2
_pitmodel_bias ; 0/2,4
year ; 2018/2019
_test_event ; Indy500-2018, Indy500-2019
"""
import logging
from optparse import OptionParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
logger = logging.getLogger(__name__)
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
print('skip this event:', events[_data[0]])
break
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
if _joint_train:
target_cols = [run_ts, COL_LAPSTATUS]
target_val = rec[target_cols].copy().astype(np.float32)
else:
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
if _joint_train:
_train.append({'target': target_val[:,:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
else:
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
if _joint_train:
_test.append({'target': target_val[:,:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
_test.append({'target': target_val[:endpos],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq,one_dim_target= False if _joint_train else True)
test_ds = ListDataset(test_set, freq=freq,one_dim_target= False if _joint_train else True)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=False,
#cardinality=cardinality,
use_feat_dynamic_real=False,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=False,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40,
joint_train = False,
pitmodel_bias = 0):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40,
forecastmode = 'shortterm', joint_train = False,
pitmodel_bias= 0):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel, pitmodel_bias= pitmodel_bias)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
stint._joint_train = joint_train
if forecastmode == 'stint':
stint._trim = 0
stint._debug_carlist=[]
stint._force_endpit_align = False
stint._include_endpit = True
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
if forecastmode == 'shortterm':
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
elif forecastmode == 'stint':
ret2[i] = stint.run_simulation_pred(predictor, predictionlen, stint.freq, datamode=datamode)
else:
print('forecastmode not support:', forecastmode)
break
acc = []
for i in ret2.keys():
if forecastmode == 'shortterm':
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
elif forecastmode == 'stint':
df = ret2[i]
_x = stint.get_evalret(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
if _forecast_mode == 'shortterm':
df = dfx[id][0]
else:
df = dfx[id]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
if _forecast_mode == 'shortterm':
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
else:
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
#
# stint evaluation
#
def eval_bydf(testdf, bydf, forcematch=True, force2int=False):
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
if forcematch:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
#print('mismatch:', a, b)
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def eval_sync(testdf, errlist, force2int=False):
"""
eval df result by sync with the errlist detected
remove the records in errlist
"""
#collect only records in bydf <carno and startlap>
cars = set(testdf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(testdf[testdf['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)]
#check match
this_rec = [car, startlap]
if this_rec in errlist:
continue
retdf.append(dfrec)
dfout = pd.concat(retdf)
if force2int:
dfdata = dfout.to_numpy().astype(int)
dfout = pd.DataFrame(dfdata, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
'endlap','pred_endlap'
])
dfout = dfout.sort_values(by=['carno','startlap'])
print('df size:', len(dfout))
#return acc
accret = stint.get_evalret(dfout)[0]
return dfout , accret
def cmp_df(testdf, bydf):
"""
df can be different, minor difference for the rank when RankNet removes short ts
"""
#collect only records in bydf <carno and startlap>
cars = set(bydf.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(bydf[bydf['carno']==car].startlap.values)
err_list = []
retdf = []
errcnt = 0
for car in cars:
for startlap in startlaps[car]:
a = testdf[(testdf['carno']==car) & (testdf['startlap']==startlap)].to_numpy().astype(int)
b = bydf[(bydf['carno']==car) & (bydf['startlap']==startlap)].to_numpy().astype(int)
if len(a)!=0 and len(b)!=0:
#compare
#startrank, endrank
if not ((a[0][2] == b[0][2]) and (a[0][3] == b[0][3])):
print('mismatch:', a, b)
errcnt += 1
err_list.append([car, startlap])
else:
errcnt += 1
print('mismatch empty:', a, b)
err_list.append([car, startlap])
print('errcnt:', errcnt)
return errcnt, err_list
def df2samples(dfall, prediction_len=2, samplecnt=1):
"""
convert a df into <samples, tss> format
this version works for the output of ml modles which contains only 1 sample
"""
carlist = set(dfall.carno.values)
full_samples = {}
full_tss = {}
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0] + prediction_len)
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.values[0]
for idx in range(samplecnt):
full_samples[carno][idx,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def df2samples_ex(dfall, samplecnt=100,errlist=[]):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
#samplecnt = len(runret)
full_samples = {}
full_tss = {}
carlist = set(dfall.carno.values)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
def runs2samples(runret, errlist):
"""
for stint results only
get samples from the runs
input:
runret ; list of result df <carno,startlap,startrank,endrank,diff,sign,pred_endrank,pred_diff,pred_sign,endlap,pred_endlap>
errlist ; <car, startlap> list
return:
samples, tss
"""
samplecnt = len(runret)
carlist = set(runret[0].carno.values)
full_samples = {}
full_tss = {}
#concat all dfs
dfall = pd.concat(runret)
startlaps = {}
for car in carlist:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
#empty samples
for carid, carno in enumerate(carlist):
full_tss[carno] = np.zeros((200))
full_tss[carno][:] = np.nan
full_samples[carno] = np.zeros((samplecnt,200))
full_samples[carno][:] = np.nan
for startlap in startlaps[carno]:
thisrec = [carno,startlap]
if thisrec in errlist:
continue
dfrec = dfall[(dfall['carno']==carno) & (dfall['startlap']==startlap)]
curlap = int(dfrec.startlap.values[0])
target = dfrec.endrank.values[0]
forecast = dfrec.pred_endrank.to_numpy()
#if carno==12:
# print('forecast.shape', forecast.shape)
full_samples[carno][:,curlap] = forecast
full_tss[carno][curlap] = target
return full_samples, full_tss
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
#
# global settings
#
#_savedata = False
_savedata = True
_skip_overwrite = True
#inlap status =
# 0 , no inlap
# 1 , set previous lap
# 2 , set the next lap
_inlap_status = 0
#
# featuremode in [FEATURE_STATUS, FEATURE_PITAGE]:
#
_feature_mode = FEATURE_LEADERPITCNT
_featureCnt = 9
#
# training parameters
#
freq = "1min"
_train_len = 60
prediction_length = 2
context_ratio = 0.
context_length = 60
contextlen = context_length
dataset='rank'
epochs = 1000
#epochs = 10
gpuid = 5
#'deepAR-Oracle','deepARW-Oracle'
_use_weighted_model = True
trainmodel = 'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = False
use_feat_static = _use_cate_feature
distroutput = 'student'
batch_size = 32
#
# test parameters
#
loopcnt = 2
_test_event = 'Indy500-2018'
testmodel = 'oracle'
pitmodel = 'oracle'
year = '2018'
#config2 = get_config()
# In[ ]:
# new added parameters
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
_forecast_mode = 'shortterm'
#_test_event = 'Indy500-2019'
#year = '2019'
#shortterm, stint
#_forecast_mode = 'stint'
#_forecast_mode = 'shortterm'
# bias of the pitmodel
#_pitmodel_bias = 4
#train model: [deepARW-Oracle, deepAR]
# test the standard deepAR model training and testing
# DeepAR
#trainmodel = 'deepAR'
#testmodel = 'standard'
# Joint
#trainmodel = 'deepAR-multi'
#testmodel = 'joint'
#_joint_train = True
#loopcnt = 2
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid > 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
## deduced paramters
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#loopcnt = 2
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('trainmodel:', trainmodel, 'jointtrain:', _joint_train)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel, 'pitmodel bias:', _pitmodel_bias)
print('year:', year, 'test_event:', _test_event)
print('loopcnt:', loopcnt)
print('gpuid:', gpuid)
sys.stdout.flush()
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# ### 1. make laptime dataset
# In[ ]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata,inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 2. make gluonts db
# In[ ]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[ ]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[ ]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# ### 5. final evaluation
# In[ ]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
else:
datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[year]['lasso'])
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[year][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#ml models -oracle
#for clf in ['rf','svr_lin','xgb']:
# print('year:',year,'clf:',clf)
# dfout, accret = eval_sync(preddf_oracle[year][clf],errlist[year])
# fsamples, ftss = df2samples(dfout)
# _, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
# retdata.append([year,models[clf]+'-Oracle',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
#dfout, accret = eval_sync(ranknetdf[year]['oracle_mean'], errlist[year],force2int=True)
##fsamples, ftss = df2samples(dfout)
#fsamples, ftss = runs2samples(ranknet_ret[f'oracle-TIMEDIFF-{year}-noinlap-nopitage'],errlist[f'{year}'])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#retdata.append([year,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[ ]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0],sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[ ]:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 122,909 | 32.840859 | 189 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-3fmodes.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
import indycar.model.stint_simulator_shortterm_pitmodel as stint
# In[ ]:
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADERPITCNT = 8
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS):
"""
add a new feature into mat(car, feature, lap)
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt
pits = np.zeros((dim1,dim3))
for lap in range(dim3):
col = idx[:, lap]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.cumsum(pits, axis=0) - pits
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
newmat[:,:dim2,:] = selmat.copy()
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dim2, lap] = leaderCnt[:, lap]
return newmat
def prepare_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = events_id[test_event]
run_ts = COL_RANK
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
print('skip this event:', events[_data[0]])
break
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new feature
data2_newfeature = add_leader_cnt(_data[2][sel_idx])
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def make_dataset_byevent(_laptime_data, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
#train real features
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]]
}
# all go to train set
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features[feature_mode]
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
#train real features
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:context_len],rec[COL_LAPSTATUS,:context_len]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:context_len],rec[COL_LAPSTATUS,:context_len],rec[COL_LAPS_INSTINT,:context_len]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:context_len],rec[COL_LAPSTATUS,:context_len],rec[COL_LEADER_PITCNT,:context_len]]
}
# all go to train set
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features[feature_mode]
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
#train real features
real_features = {
FEATURE_STATUS:[track_rec,lap_rec],
FEATURE_PITAGE:[track_rec,lap_rec,pitage_rec],
FEATURE_LEADERPITCNT:[track_rec,lap_rec,rec[COL_LEADER_PITCNT,:endpos]]
}
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features[feature_mode]
})
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW':
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
acc = []
for i in ret2.keys():
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
df = dfx[id][0]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
# ## run
# In[ ]:
if len(sys.argv) != 2:
print('usage: RankNet-QuickTest.py <configfile>')
sys.exit(0)
configfile = sys.argv[1]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
#
# global settings
#
#_savedata = False
_savedata = True
_skip_overwrite = True
#inlap status =
# 0 , no inlap
# 1 , set previous lap
# 2 , set the next lap
_inlap_status = 0
#
# featuremode in [FEATURE_STATUS, FEATURE_PITAGE]:
#
_feature_mode = FEATURE_LEADERPITCNT
_featureCnt = 9
#
# training parameters
#
freq = "1min"
_train_len = 60
prediction_length = 2
context_ratio = 0.
context_length = 60
contextlen = context_length
dataset='rank'
epochs = 1000
#epochs = 10
gpuid = 5
#'deepAR-Oracle','deepARW-Oracle'
_use_weighted_model = True
trainmodel = 'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = False
use_feat_static = _use_cate_feature
distroutput = 'student'
batch_size = 32
#
# test parameters
#
loopcnt = 2
_test_event = 'Indy500-2018'
testmodel = 'oracle'
pitmodel = 'oracle'
year = '2018'
#config2 = get_config()
# In[ ]:
# new added parameters
_test_train_len = 40
# In[ ]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], featurestr[_feature_mode])
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-{catestr[_use_cate_feature]}-c{context_length}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'stagedata-{dbid}.pickle'
EVALUATION_RESULT_DF = f'evaluation_result_d{dataset}.csv'
LONG_FORECASTING_DFS = f'long_forecasting_dfs_d{dataset}.pickle'
FORECAST_FIGS_DIR = f'forecast-figs-d{dataset}/'
# ### 1. make laptime dataset
# In[ ]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(outputRoot + LAPTIME_DATASET) and os.path.exists(outputRoot + STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',outputRoot + LAPTIME_DATASET, outputRoot + STAGE_DATASET)
with open(outputRoot + LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(outputRoot + STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata,inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = outputRoot + LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = outputRoot + STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 2. make gluonts db
# In[ ]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[ ]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[ ]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
datasetid = outputRoot + _dataset_id
simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
if _skip_overwrite and os.path.exists(simulation_outfile):
print('Load Simulation Results:',simulation_outfile)
with open(simulation_outfile, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(simulation_outfile, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
# ### 5. final evaluation
# In[ ]:
if _skip_overwrite and os.path.exists(outputRoot + EVALUATION_RESULT_DF):
print('Load Evaluation Results:',outputRoot + EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(outputRoot + EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year]['oracle_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,'Oracle',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year]['oracle_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,'RankNet-Oracle',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, 'Oracle',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(outputRoot + EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[ ]:
if _skip_overwrite and os.path.exists(outputRoot + LONG_FORECASTING_DFS):
fname = outputRoot + LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
print('eval mid:', mid, 'oracle_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year]['oracle_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf('oracle-1run-dfout', dfin_oracle)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf('oracle-multimean', dfin_oracle)
#no rerank
df = ranknetdf['2018']['oracle_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf('oracle-norerank-multimean', dfin_oracle)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0],sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(outputRoot + LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[ ]:
destdir = outputRoot + FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata['Indy500-2018']
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
print(outputRoot)
print(oracle_eval_result)
| 95,347 | 32.478933 | 189 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-beforejoint.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# In[ ]:
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
print('cars:', carnumber)
print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
#df = uni_ds[['car_number','completed_laps','rank',
# 'rank_diff','time_diff',"current_status", "track_status", "lap_status",'elapsed_time']]
df = uni_ds[['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']]
return df
def make_lapstatus_data(dataset):
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#pick up one of them
onecar = dataset[dataset['car_number']==completed_car_numbers[0]]
onecar = onecar.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
return onecar[['completed_laps','track_status']]
def load_data(event, year=0):
#inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
if year>0:
inputfile = '../data/final/C_'+ event +'-' + year + '.csv'
else:
inputfile = '../data/final/C_'+ event +'.csv'
#outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
print('count of completed cars:', completed_car_count)
print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
flagdata = make_lapstatus_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata, flagdata
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def get_lap2nextpit(lap_status, maxlap=200):
"""
input:
lapstatus ; array of 0/1 indicating pitstops for each lap, nan means incomplete race
maxlap ; the max lap number of the race
output:
lap2nextpit ; array of the lap gap to the next pit for each lap
"""
#pitstops = np.where(lap_status==1)[0]
pitstops = list(np.where(lap_status==1)[0])
#if not len(lap_status) < maxlap:
nans, x= nan_helper(lap_status)
nan_count = np.sum(nans)
if nan_count == 0:
#complete cars
# the last stint, to the end
pitstops.append(maxlap)
lap2nextpit = np.zeros_like(lap_status)
lap2nextpit[:] = np.nan
#guard
if len(pitstops)==0:
return lap2nextpit
idx = 0
for lap in range(len(lap_status)):
if lap < pitstops[idx]:
lap2nextpit[lap] = pitstops[idx] - lap
else:
idx += 1
if idx < len(pitstops):
lap2nextpit[lap] = pitstops[idx] - lap
else:
break
return lap2nextpit
def get_lapdata(acldata):
"""
input:
acldata['car_number','completed_laps','time_diff','rank','track_status', 'lap_status','elapsed_time']
timediff: [car_number, completed_laps] -> elapsed time diff to leader
output:
lapdata = acldata[['car_number','completed_laps',
'time_diff','rank','track_status', 'lap_status','time_behind']].to_numpy()
"""
COL_COMPLETED_LAPS = 1
COL_ELAPSED_TIME = 6
maxlap = np.max(acldata['completed_laps'].values)
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
time_behind = []
for lap in range(1, maxlap+1):
this_lap = acldata[acldata['completed_laps']==lap][
['car_number','completed_laps','time_diff','rank',
'track_status', 'lap_status','elapsed_time']].values
min_elapsed_time = np.nanmin(this_lap[:,COL_ELAPSED_TIME].astype(np.float))
#print(f'lap:{lap}, min_elapsed_time:{min_elapsed_time}')
for row in this_lap:
car_number = int(row[0])
time_diff = row[2]
rank = row[3]
track_status = row[4]
lap_status = row[5]
timebehind = float(row[COL_ELAPSED_TIME]) - min_elapsed_time
#
time_behind.append([car_number, lap, time_diff,rank,track_status, lap_status,
timebehind, float(row[COL_ELAPSED_TIME])])
#return
lapdata = np.array(time_behind)
return lapdata
# features: laptime, rank, track_status, lap_status, timediff
LAPTIME = 0
RANK = 1
TRACK_STATUS = 2
LAP_STATUS = 3
TIME_BEHIND = 4
CAUTION_LAPS_INSTINT = 5
LAPS_INSTINT = 6
ELAPSED_TIME = 7
LAP2NEXTPIT = 8
_featureCnt = 9
def get_laptime_dataset(stagedata, inlap_status = 0):
"""
#add caution_laps_instint, laps_instint
input: (alldata, rankdata, acldata, flagdata)
output: laptime & rank data
[(
eventid,
carids : rowid -> carno,
datalist: #car_number x features x #totallaps (padded by Nan)
entry: [[laptime, rank, track_status, lap_status,
caution_laps_instint, laps_instint]]
)]
"""
laptime_data = []
for event in stagedata.keys():
print(f'start event: {event}')
laptime_rec = []
eventid = events_id[event]
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
totalcars = len(carlist)
totallaps = len(laplist)
#carnumber -> carid
carids={key:idx for idx, key in enumerate(carlist)}
decode_carids={idx:key for idx, key in enumerate(carlist)}
#init
lap_instint = {carids[x]:0 for x in carlist}
caution_instint = {carids[x]:0 for x in carlist}
#array: car_number x lap
#laptime = np.zeros((totalcars, totallaps-1))
#rank = np.zeros((totalcars, totallaps-1))
laptime = np.empty((totalcars, totallaps-1))
rank = np.empty((totalcars, totallaps-1))
laptime[:] = np.NaN
rank[:] = np.NaN
datalist = np.empty((totalcars, _featureCnt, totallaps-1))
datalist[:] = np.NaN
#lapdata = acldata[['car_number','completed_laps',
# 'time_diff','rank','track_status', 'lap_status','elapsed_time']].to_numpy()
#'car_number','completed_laps','time_diff','rank','track_status', 'lap_status','time_behind'
lapdata = get_lapdata(acldata)
for row in lapdata:
#completed_laps
if int(row[1]) == 0:
continue
#add to data array
car_number = carids[int(row[0])]
completed_laps = int(row[1])-1
time_diff = float(row[2])
rank = int(row[3])
track_status = 1 if row[4]=='Y' else 0
lap_status = 1 if row[5]=='P' else 0
time_behind = float(row[6])
datalist[car_number, LAPTIME, completed_laps] = time_diff
datalist[car_number, RANK, completed_laps] = rank
datalist[car_number, TRACK_STATUS, completed_laps] = track_status
datalist[car_number, LAP_STATUS, completed_laps] = lap_status
datalist[car_number, TIME_BEHIND, completed_laps] = time_behind
datalist[car_number, ELAPSED_TIME, completed_laps] = float(row[7])
#stint status
if track_status == 1:
caution_instint[car_number] += 1
lap_instint[car_number] += 1
if lap_status == 1:
#new stint
lap_instint[car_number] = 0
caution_instint[car_number] = 0
# add inlap feature into lap_Status
# set the previous lap to inlap status
# what does it mean?
if (inlap_status!=0):
if inlap_status == 1:
# set the previous lap of 'P'
if completed_laps > 0:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps-1] = 1
else:
# set the next lap of 'P'
if completed_laps +1 < totallaps:
#datalist[car_number, LAP_STATUS, completed_laps-1] = INLAP_STATUS
datalist[car_number, LAP_STATUS, completed_laps + 1] = 1
datalist[car_number, LAPS_INSTINT, completed_laps] = lap_instint[car_number]
datalist[car_number, CAUTION_LAPS_INSTINT, completed_laps] = caution_instint[car_number]
#update lap2nextpit in datalist
for caridx in range(datalist.shape[0]):
lap_status = datalist[caridx, LAP_STATUS, :]
#pit status
lap2nextpit = get_lap2nextpit(lap_status)
datalist[caridx, LAP2NEXTPIT, :] = lap2nextpit
#add one record
laptime_data.append([eventid, decode_carids, datalist])
# push this event into stage dataframe
print('event=%s, records=%s'%(event, datalist.shape))
return laptime_data
# In[ ]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
COL_ELAPSED_TIME= 7
COL_LAP2NEXTPIT = 8
#_featureCnt = 9
# added new features
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
COL_LASTFEATURE = 14
# dynamically extended space in simulation
COL_TRACKSTATUS_SAVE = COL_LASTFEATURE+1
COL_LAPSTATUS_SAVE = COL_LASTFEATURE+2
COL_CAUTION_LAPS_INSTINT_SAVE = COL_LASTFEATURE+3
COL_LAPS_INSTINT_SAVE= COL_LASTFEATURE+4
COL_ENDPOS = COL_LASTFEATURE+5
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
_feature2str= {
FEATURE_STATUS : ("FEATURE_STATUS",'S'),
FEATURE_PITAGE : ("FEATURE_PITAGE",'A'),
FEATURE_LEADER_PITCNT : ("FEATURE_LEADER_PITCNT",'L'),
FEATURE_TOTAL_PITCNT : ("FEATURE_TOTAL_PITCNT",'T'),
FEATURE_SHIFT_TRACKSTATUS : ("FEATURE_SHIFT_TRACKSTATUS",'Y'),
FEATURE_SHIFT_LAPSTATUS : ("FEATURE_SHIFT_LAPSTATUS",'P'),
FEATURE_SHIFT_LEADER_PITCNT : ("FEATURE_SHIFT_LEADER_PITCNT",'L'),
FEATURE_SHIFT_TOTAL_PITCNT : ("FEATURE_SHIFT_TOTAL_PITCNT",'T')
}
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
#_feature_mode = FEATURE_STATUS
def decode_feature_mode(feature_mode):
retstr = []
short_ret = []
for feature in _feature2str.keys():
if test_flag(feature_mode, feature):
retstr.append(_feature2str[feature][0])
short_ret.append(_feature2str[feature][1])
else:
short_ret.append('0')
print(' '.join(retstr))
return ''.join(short_ret)
def add_leader_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS, shift_len = 0,
dest_col = COL_LEADER_PITCNT,
verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift rank status
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
# rerank by the rank_col
idx = np.argsort(selmat[:, rank_col,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
# get leaderCnt by sorted pits
pits = np.zeros((dim1,dim3))
for lap in range(shift_len, dim3):
col = idx[:, lap-shift_len]
pits[:, lap] = selmat[col, pit_col, lap]
leaderCnt = np.nancumsum(pits, axis=0) - pits
if verbose:
print('pits:\n')
print(pits[:,190:])
print('leaderCnt raw:\n')
print(leaderCnt[:,190:])
#remove nans
nanidx = np.isnan(leaderCnt)
leaderCnt[nanidx] = 0
if verbose:
print('leaderCnt after remove nan:\n')
print(leaderCnt[:,190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for lap in range(dim3):
col = idx[:, lap]
newmat[col, dest_col, lap] = leaderCnt[:, lap]
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_allpit_cnt(selmat, rank_col=COL_RANK, pit_col=COL_LAPSTATUS,
dest_col = COL_TOTAL_PITCNT,verbose = False):
"""
add a new feature into mat(car, feature, lap)
total pits in a lap
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
#calc totalCnt vector for
totalCnt = np.nansum(selmat[:, pit_col, :], axis=0).reshape((-1))
if verbose:
print('pits:\n')
print(pits[:,190:])
print('totalCnt raw:\n')
print(totalCnt[190:])
#remove nans
nanidx = np.isnan(totalCnt)
totalCnt[nanidx] = 0
if verbose:
print('totalCnt after remove nan:\n')
print(totalCnt[190:])
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
newmat[car, dest_col, :] = totalCnt
# sync length to COL_RANK
for rec in newmat:
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
if nan_count > 0:
#todo, some invalid nan, remove them
#rec[dim2, np.isnan(rec[dim2,:])] = 0
rec[dest_col, -nan_count:] = np.nan
return newmat
def add_shift_feature(selmat, rank_col=COL_RANK, shift_col=COL_LAPSTATUS, shift_len = 2,
dest_col = -1,verbose = False):
"""
add a new feature into mat(car, feature, lap)
shift features left in a lap
warning: these are oracle features, be careful not to let future rank positions leaking
input:
sel_mat : laptime_data array [car, feature, lap]
"""
dim1, dim2, dim3 = selmat.shape
if dest_col == -1:
#create a new data
newmat = np.zeros((dim1,dim2+1,dim3))
dest_col = dim2
newmat[:,:dim2,:] = selmat.copy()
else:
#update mode
newmat = selmat
for car in range(dim1):
# set empty status by default
newmat[car, dest_col, :] = np.nan
# get valid laps
rec = selmat[car]
nans, x= nan_helper(rec[rank_col,:])
nan_count = np.sum(nans)
recnnz = rec[shift_col, ~np.isnan(rec[rank_col,:])]
reclen = len(recnnz)
#shift copy
newmat[car, dest_col, :reclen] = 0
#newmat[car, dim2, :-shift_len] = selmat[car, shift_col, shift_len:]
newmat[car, dest_col, :reclen-shift_len] = recnnz[shift_len:]
# sync length to COL_RANK
#for rec in newmat:
# nans, x= nan_helper(rec[rank_col,:])
# nan_count = np.sum(nans)
# if nan_count > 0:
# #todo, some invalid nan, remove them
# #rec[dim2, np.isnan(rec[dim2,:])] = 0
# rec[dim2, -nan_count:] = np.nan
return newmat
def prepare_laptimedata(prediction_length, freq,
test_event = 'Indy500-2018',
train_ratio=0.8,
context_ratio = 0.,
shift_len = -1):
"""
prepare the laptime data for training
1. remove short ts
2. rerank the tss
3. create new features
input:
laptime_data ; global var
output:
data ; new representation of laptime_data
"""
_laptime_data = laptime_data.copy()
test_eventid = events_id[test_event]
run_ts = COL_RANK
# check shift len
if shift_len < 0:
shift_len = prediction_length
print('prepare_laptimedata shift len:', shift_len)
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
new_data = []
for _data in _laptime_data:
#skip eid > test_eventid
if _data[0] > test_eventid:
print('skip this event:', events[_data[0]])
break
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'before ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
#rerank due to short ts removed
#if run_ts == COL_RANK and dorerank == True:
if True:
sel_rows = []
# use to check the dimension of features
input_feature_cnt = _data[2].shape[1]
if input_feature_cnt < COL_LASTFEATURE + 1:
print('create new features mode, feature_cnt:', input_feature_cnt)
else:
print('update features mode, feature_cnt:', input_feature_cnt)
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'rerank a short ts: carid={_data[1][rowid]},len={totallen}')
continue
else:
sel_rows.append(rowid)
#get selected matrix
sel_idx = np.array(sel_rows)
selmat = _data[2][sel_idx]
# check the format of _data
#ipdb.set_trace()
mask = np.isnan(selmat[:,COL_RANK,:])
idx = np.argsort(selmat[:,COL_RANK,:], axis=0)
true_rank = np.argsort(idx, axis=0).astype(np.float)
true_rank[mask] = np.nan
if test_mode:
#
# for historical code mismatch, simulation does not run rerank
#
_data[2][sel_idx,COL_RANK,:] = true_rank + 1
else:
_data[2][sel_idx,COL_RANK,:] = true_rank
# update the carno dict
new_carids = {}
for rowid in range(len(sel_idx)):
carid = sel_idx[rowid]
carno = _data[1][carid]
new_carids[rowid] = carno
# add new features
# add leaderPitCnt
if _data[0]==0:
verbose = True
else:
verbose = False
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_LEADER_PITCNT
data2_intermediate = add_leader_cnt(_data[2][sel_idx], shift_len = shift_len, dest_col=dest_col, verbose = verbose)
# add totalPit
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_TOTAL_PITCNT
data2_intermediate = add_allpit_cnt(data2_intermediate, dest_col=dest_col)
#
# add shift features, a fixed order, see the MACROS
#COL_SHIFT_TRACKSTATUS = 11
#COL_SHIFT_LAPSTATUS = 12
#COL_SHIFT_LEADER_PITCNT = 13
#COL_SHIFT_TOTAL_PITCNT = 14
#
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TRACKSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TRACKSTATUS, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LAPSTATUS
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LAPSTATUS, shift_len = shift_len)
# leader_pitcnt can not be shift, target leaking, just do not use it
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_LEADER_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_LEADER_PITCNT, shift_len = shift_len)
dest_col = -1 if input_feature_cnt < COL_LASTFEATURE + 1 else COL_SHIFT_TOTAL_PITCNT
data2_intermediate = add_shift_feature(data2_intermediate, dest_col=dest_col,
shift_col=COL_TOTAL_PITCNT, shift_len = shift_len)
# final
data2_newfeature = data2_intermediate
new_data.append([_data[0], new_carids, data2_newfeature])
return new_data
def get_real_features(feature_mode, rec, endpos):
"""
construct the real value feature vector from feature_mode
legacy code:
real_features = {
FEATURE_STATUS:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:]],
FEATURE_PITAGE:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LAPS_INSTINT,:]],
FEATURE_LEADERPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_LEADER_PITCNT,:]],
FEATURE_TOTALPITCNT:[rec[COL_TRACKSTATUS,:],rec[COL_LAPSTATUS,:],rec[COL_TOTAL_PITCNT,:]]
}
real_features[feature_mode]
COL_LEADER_PITCNT = 9
COL_TOTAL_PITCNT = 10
COL_SHIFT_TRACKSTATUS = 11
COL_SHIFT_LAPSTATUS = 12
COL_SHIFT_LEADER_PITCNT = 13
COL_SHIFT_TOTAL_PITCNT = 14
FEATURE_STATUS = 2
FEATURE_PITAGE = 4
FEATURE_LEADER_PITCNT = 8
FEATURE_TOTAL_PITCNT = 16
FEATURE_SHIFT_TRACKSTATUS = 32
FEATURE_SHIFT_LAPSTATUS = 64
FEATURE_SHIFT_LEADER_PITCNT = 128
FEATURE_SHIFT_TOTAL_PITCNT = 256
"""
features = []
#check endpos
if endpos <=0 :
endpos = rec.shape[1]
if test_flag(feature_mode, FEATURE_STATUS):
features.append(rec[COL_TRACKSTATUS,:endpos])
features.append(rec[COL_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_PITAGE):
features.append(rec[COL_LAPS_INSTINT,:endpos])
if test_flag(feature_mode, FEATURE_LEADER_PITCNT):
features.append(rec[COL_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_TOTAL_PITCNT):
features.append(rec[COL_TOTAL_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TRACKSTATUS):
features.append(rec[COL_SHIFT_TRACKSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LAPSTATUS):
features.append(rec[COL_SHIFT_LAPSTATUS,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_LEADER_PITCNT):
features.append(rec[COL_SHIFT_LEADER_PITCNT,:endpos])
if test_flag(feature_mode, FEATURE_SHIFT_TOTAL_PITCNT):
features.append(rec[COL_SHIFT_TOTAL_PITCNT,:endpos])
return features
def make_dataset_byevent(_laptime_data, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500-2018',
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = True,
train_ratio=0.8,
log_transform = False,
context_ratio = 0.,
dorerank = True,
test_cars = []
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
#global setting
feature_mode = _feature_mode
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
totalTSCnt = 0
totalTSLen = 0
test_eventid = events_id[test_event]
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
train_len = int(np.max(ts_len) * train_ratio)
if train_len == 0:
#use global train_len
train_len = _train_len if not test_mode else _test_train_len
if context_ratio != 0.:
# add this part to train set
context_len = int(np.max(ts_len) * context_ratio)
else:
context_len = prediction_length*2
if context_len < 10:
context_len = 10
print(f'after ====event:{events[_data[0]]}, prediction_len={prediction_length},train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)},context_len={context_len}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
totalTSCnt += 1
totalTSLen += totallen
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#check carno in test_cars, testmode only
if len(test_cars)>0 and carno not in test_cars:
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
#first, get target a copy
# target can be COL_XXSTATUS
target_val = rec[run_ts,:].copy().astype(np.float32)
if log_transform:
target_val = np.log(target_val + 1.0)
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
real_features = get_real_features(feature_mode, rec, -1)
_train.append({'target': target_val,
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
else:
# reset train_len
if context_ratio != 0.:
# all go to train set
#add [0, context_len] to train set
# all go to train set
_train.append({'target': target_val[:context_len],
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': get_real_features(feature_mode, rec, context_len)
})
# testset
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
step = -1
for endpos in range(totallen, context_len+prediction_length,
step):
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
pitage_rec = rec[COL_LAPS_INSTINT, :endpos].copy()
real_features = get_real_features(feature_mode, rec, endpos)
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': real_features
})
test_rec_cnt += 1
#check feature cnt
featureCnt = len(real_features)
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt},featureCnt:{featureCnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}, totsl TsCnt:{totalTSCnt}, total ts len:{totalTSLen}')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
# In[ ]:
def init_estimator(model, gpuid, epochs=100, batch_size = 32,
target_dim = 3, distr_output = None, use_feat_static = True):
if int(gpuid) < 0:
ctx = "cpu"
else:
ctx = "gpu(%s)"%gpuid
if model == 'deepAR':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW':
estimator = DeepARWEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=True,
cardinality=cardinality,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-Oracle':
if use_feat_static:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
else:
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepARW-Oracle':
if use_feat_static:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
else:
estimator = DeepARWeightEstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
#hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-nocarid':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
cardinality=cardinality,
use_feat_dynamic_real=True,
distr_output = distr_output,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepAR-multi':
estimator = DeepAREstimator(
prediction_length=prediction_length,
context_length= context_length,
use_feat_static_cat=use_feat_static,
#cardinality=cardinality,
use_feat_dynamic_real=True,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
),
distr_output=MultivariateGaussianOutput(dim=target_dim),
)
elif model == 'simpleFF':
estimator = SimpleFeedForwardEstimator(
num_hidden_dimensions=[10],
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
hybridize=False,
num_batches_per_epoch=100
)
)
elif model == 'deepFactor':
estimator = DeepFactorEstimator(
prediction_length=prediction_length,
context_length= context_length,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'deepState':
estimator = DeepStateEstimator(
prediction_length=prediction_length,
use_feat_static_cat=True,
cardinality=cardinality,
freq=freq,
trainer=Trainer(ctx=ctx,
batch_size = batch_size,
epochs=epochs,
learning_rate=1e-3,
num_batches_per_epoch=100
)
)
elif model == 'ets':
estimator = RForecastPredictor(method_name='ets',freq= freq, prediction_length = prediction_length)
elif model == 'prophet':
estimator = ProphetPredictor(freq= freq, prediction_length = prediction_length)
elif model == 'arima':
estimator = RForecastPredictor(method_name='arima',freq= freq, prediction_length = prediction_length, trunc_length = 200)
elif model == 'naive':
estimator = NaivePredictor(freq= freq, prediction_length = prediction_length)
else:
logger.error('model %s not support yet, quit', model)
sys.exit(-1)
return estimator
# In[ ]:
#
# simulation engine general
#
def init_simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
featuremode = stint.FEATURE_STATUS,
pitmodel = 0,
inlapmode=0,
train_len = 40,test_train_len=40):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
def simulation(datasetid, testevent, taskid, runts, expid, predictionlen,
datamode, loopcnt, featuremode = stint.FEATURE_STATUS,
pitmodel = 0, model = 'oracle', inlapmode=0, train_len = 40,test_train_len=40):
"""
input:
prepared_laptimedata ; global
"""
#
# configurataion
#
# model path: <_dataset_id>/<_task_id>-<trainid>/
#_dataset_id = 'indy2013-2018-nocarid'
stint._inlap_status = inlapmode
stint.init(pitmodel)
# todo: add into stint code
#here add new laptime_data with new features
#
stint.set_laptimedata(prepared_laptimedata)
#stint.set_laptimedata(laptime_data)
stint._dataset_id = datasetid
stint._test_event = testevent
#_test_event = 'Indy500-2019'
stint._feature_mode = featuremode
stint._context_ratio = 0.
stint._task_id = taskid # rank,laptime, the trained model's task
stint._run_ts = runts #COL_LAPTIME,COL_RANK
stint._exp_id=expid #rank, laptime, laptim2rank, timediff2rank...
stint._use_mean = True
stint._train_len = train_len
stint._test_train_len = test_train_len
predictor = stint.load_model(predictionlen, model,trainid='indy500',epochs = epochs, exproot='./')
ret2 = {}
for i in range(loopcnt):
#df, full_samples, full_tss
ret2[i] = stint.run_simulation_shortterm(predictor, predictionlen, stint.freq, datamode=datamode)
acc = []
for i in ret2.keys():
df = ret2[i][0]
_x = stint.get_evalret_shortterm(df)
acc.append(_x)
b = np.array(acc)
print(np.mean(b, axis=0))
#save keys
#stint._pitmodel.save_keys('pitmodel-keys.pickle')
return b, ret2
def long_predict(predictor, sampleCnt = 100):
"""
use the farest samples only
input:
test_ds ; global var
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
target.samples = newsamples
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
def get_alldf(dfx, year=2018):
#dfx = ret[f'{model}-RANK-{year}-inlap-nopitage']
#dfx = ret[f'{model}-TIMEDIFF-{year}-noinlap-nopitage']
samples = dfx.keys()
retdfs = []
for id in samples:
df = dfx[id][0]
retdfs.append(df)
if len(retdfs) > 1:
dfout = pd.concat(retdfs)
else:
dfout = retdfs[0]
return dfout
def get_alldf_mode(dfx, year=2018,mode=0):
"""
mode:
0; mode
1; mean
2; median
"""
dfall = get_alldf(dfx, year=year)
cars = set(dfall.carno.values)
startlaps = {}
for car in cars:
startlaps[car] = set(dfall[dfall['carno']==car].startlap.values)
retdf = []
for car in cars:
for startlap in startlaps[car]:
dfrec = dfall[(dfall['carno']==car) & (dfall['startlap']==startlap)]
#get mode
if mode == 0:
pred_endrank = stats.mode(dfrec.pred_endrank.values).mode[0]
#pred_endlap = stats.mode(dfrec.pred_endlap.values).mode[0]
elif mode == 1:
#use mean
pred_endrank = np.mean(dfrec.pred_endrank.values)
#pred_endlap = np.mean(dfrec.pred_endlap.values)
elif mode == 2:
#use mean
pred_endrank = np.median(dfrec.pred_endrank.values)
#pred_endlap = np.median(dfrec.pred_endlap.values)
firstrec = dfrec.to_numpy()[0,:]
firstrec[6] = pred_endrank
firstrec[7] = pred_endrank - firstrec[2]
if firstrec[7] == 0:
firstrec[8] = 0
elif firstrec[7] > 0:
firstrec[8] = 1
else:
firstrec[8] = -1
#endlap, pred_endlap
retdf.append(firstrec)
#dfout = pd.concat(retdf)
dfout = pd.DataFrame(retdf, columns =['carno', 'startlap', 'startrank',
'endrank', 'diff', 'sign',
'pred_endrank', 'pred_diff', 'pred_sign',
#'endlap','pred_endlap'
])
print('df size:', len(dfout))
return dfout
def get_allsamples(dfx, year=2018):
runs = list(dfx.keys())
runcnt = len(runs)
full_samples = {}
full_tss = dfx[runs[0]][2]
carlist = list(full_tss.keys())
samplecnt, lapcnt = dfx[runs[0]][1][carlist[0]].shape
print('sacmplecnt:', samplecnt, 'lapcnt:',lapcnt,'runcnt:', runcnt)
#empty samples
for carid, carno in enumerate(carlist):
full_samples[carno] = np.zeros((runcnt, lapcnt))
for runid in runs:
#one run
tss = dfx[runid][2]
forecast = dfx[runid][1]
for carid, carno in enumerate(carlist):
#get mean for this run
forecast_mean = np.nanmean(forecast[carno], axis=0)
full_samples[carno][runid, :] = forecast_mean
#if carno==3 and runid == 0:
# print('forecast:',forecast_mean)
return full_samples, full_tss
#straight implementation of prisk
def quantile_loss(target, quantile_forecast, q):
return 2.0 * np.nansum(
np.abs(
(quantile_forecast - target)
* ((target <= quantile_forecast) - q)
)
)
def abs_target_sum(target):
return np.nansum(np.abs(target))
def prisk(full_samples, full_tss, verbose = False):
carlist = full_tss.keys()
tss = []
forecasts = []
forecasts_mean = []
freq = '1min'
start = pd.Timestamp("01-01-2019", freq=freq)
for car in carlist:
testcar = car
fc = SampleForecast(samples = full_samples[testcar][:, 12:], freq=freq, start_date=start + 12)
samples = np.mean(full_samples[testcar][:, 12:], axis =0, keepdims=True)
fc_mean = SampleForecast(samples = samples, freq=freq, start_date=start + 12)
index = pd.date_range(start='2019-01-01 00:00:00', freq = 'T', periods = len(full_tss[testcar]))
ts = pd.DataFrame(index = index, data = full_tss[testcar])
tss.append(ts)
forecasts.append(fc)
forecasts_mean.append(fc_mean)
evaluator = Evaluator(quantiles=[0.1, 0.5, 0.9])
agg_metrics, item_metrics = evaluator(iter(tss), iter(forecasts), num_series=len(tss))
if verbose:
print(json.dumps(agg_metrics, indent=4))
print(agg_metrics["wQuantileLoss[0.1]"], agg_metrics["wQuantileLoss[0.5]"],agg_metrics["wQuantileLoss[0.9]"])
return agg_metrics
def prisk_direct_bysamples2(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
# In[ ]:
def prisk_direct_bysamples(full_samples, full_tss, quantiles=[0.1,0.5,0.9], startid = 12, verbose=False):
"""
calculate prisk by <samples, tss> directly (equal to gluonts implementation)
target: endrank
forecast: pred_endrank
item_id: <carno, startlap>
"""
carlist = full_tss.keys()
prisk = np.zeros((len(carlist), len(quantiles)))
target_sum = np.zeros((len(carlist)))
aggrisk = np.zeros((len(quantiles)))
for carid, carno in enumerate(carlist):
# for this car
forecast = full_samples[carno]
target = full_tss[carno]
#calc quantiles
# len(quantiles) x 1
quantile_forecasts = np.quantile(forecast, quantiles, axis=0)
for idx, q in enumerate(quantiles):
q_forecast = quantile_forecasts[idx]
prisk[carid, idx] = quantile_loss(target[startid:], q_forecast[startid:], q)
target_sum[carid] = abs_target_sum(target[startid:])
if verbose==True and carno==3:
print('target:', target[startid:])
print('forecast:', q_forecast[startid:])
print('target_sum:', target_sum[carid])
print('quantile_forecasts:', quantile_forecasts[:,startid:])
#agg
#aggrisk = np.mean(prisk, axis=0)
prisk_sum = np.nansum(prisk, axis=0)
if verbose==True:
print('prisk:',prisk)
print('prisk_sum:',prisk_sum)
print('target_sum:',target_sum)
for idx, q in enumerate(quantiles):
aggrisk[idx] = np.divide(prisk_sum[idx], np.sum(target_sum))
agg_metrics = {}
for idx, q in enumerate(quantiles):
agg_metrics[f'wQuantileLoss[{q}]'] = aggrisk[idx]
print(agg_metrics.values())
return agg_metrics, aggrisk
def clear_samples(full_samples, full_tss, clearidx):
"""
clear the laps in clearidx
"""
import copy
ret_samples = copy.deepcopy(full_samples)
ret_tss = copy.deepcopy(full_tss)
carlist = full_tss.keys()
for carid, carno in enumerate(carlist):
forecast = ret_samples[carno]
target = ret_tss[carno]
forecast[:, clearidx] = np.nan
target[clearidx] = np.nan
ret_samples[carno] = forecast
ret_tss[carno] = target
return ret_samples, ret_tss
def do_rerank(dfout, short=True):
"""
carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap
output of prediction of target can be float
resort the endrank globally
"""
cols=['carno','startlap','startrank','endrank','diff','sign','pred_endrank','pred_diff','pred_sign','endlap','pred_endlap']
colid={x:id for id,x in enumerate(cols)}
#df = dfout.sort_values(by=['startlap','carno'])
print('rerank...')
laps = set(dfout.startlap.values)
dfs = []
for lap in laps:
df = dfout[dfout['startlap']==lap].to_numpy()
#print('in',df)
idx = np.argsort(df[:,colid['pred_endrank']], axis=0)
true_rank = np.argsort(idx, axis=0)
df[:,colid['pred_endrank']] = true_rank
#reset preds
df[:,colid['pred_diff']] = df[:,colid['pred_endrank']] - df[:,colid['endrank']]
for rec in df:
if rec[colid['pred_diff']] == 0:
rec[colid['pred_sign']] = 0
elif rec[colid['pred_diff']] > 0:
rec[colid['pred_sign']] = 1
else:
rec[colid['pred_sign']] = -1
#print('out',df)
if len(dfs) == 0:
dfs = df
else:
dfs = np.vstack((dfs, df))
#dfs.append(df)
#np.vstack(df)
#dfret = pd.concat(dfs)
#data = np.array(dfs)
if short:
dfret = pd.DataFrame(dfs.astype(int), columns = cols[:-2])
else:
dfret = pd.DataFrame(dfs.astype(int), columns = cols)
return dfret
# In[ ]:
def long_predict_bymloutput_multirun(output, dfin, sampleCnt=100):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=sampleCnt, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('multirun target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bymloutput(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 2
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def long_predict_bysamples(output, samples, tss):
"""
use the farest samples only
input:
samples
tss
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=_predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print(first_start, last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
#sample array size: last_start - first_start + npredict
arraysize = last_start - first_start + npredict
#error here
#target.samples = samples[:,-len(forecasts)-1:] + 1
#target.samples = samples[:, 10 + npredict:] + 1
target.samples = samples[:, first_start:first_start + arraysize] + 1
print('long_predict_bysamples==>target samples shape:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target, tss[0]
#
# different idx format to bymloutput
#
def long_predict_bydf(output, dfin):
"""
input:
test_ds
predictor
"""
def get_start(idx):
td = forecasts[idx].start_date - start_time
return td.days*24*60 + td.seconds//60
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor= _predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
start_time, row = next(tss[0].iterrows())
first_start = get_start(-1)
last_start = get_start(0)
print('first start:', first_start, 'last start:', last_start)
import copy
target = copy.deepcopy(forecasts[-1])
#100, 10
nsample, npredict = target.samples.shape
print('sampel# x predictlen: ', nsample, npredict)
newsamples = np.zeros((nsample, last_start - first_start + npredict))
newsamples[:,:] = np.nan
for idx in range(len(forecasts)):
#copy samples
start_pos = get_start(idx)
pos = start_pos - first_start + npredict - 1
#copy sample to block
#newsamples[:, pos:pos + npredict] = forecasts[idx].samples
#newsamples[:, pos + npredict - 1] = forecasts[idx].samples[:,-1]
# get prediction from ml output
# pos = laps
# 1 ... 10 | 11 <- start pos in forecasts
# 0 ... 9 | 10 <- 9 is the startlap
#
startlap = start_pos - 1
#print('start pos:', start_pos, 'pos:',pos, 'startlap:', startlap)
_rec = dfin[dfin['startlap']== startlap]
if len(_rec) > 0:
# rank start from 1 for visualization
pred_val = _rec.pred_endrank.values[0]
#pred_val = _rec.pred_endrank.values
#make sure shape match, 100 samples
#newsamples[:, pos + npredict - 1] = pred_val + 1
newsamples[:, pos] = pred_val + 1
#print('startlap:', startlap, 'predrank:', pred_val)
target.samples = newsamples
print('target samples:', target.samples.shape)
#plot_prob_forecasts_ex([tss[0]],[target],output)
return target,tss[0]
def get_ranknet_multirun(retdata, testcar, sampleCnt=100):
dfs = []
#for id in range(samplecnt):
for id in retdata.keys():
#ret['pitmodel-RANK-2018-inlap-nopitage']
df = retdata[id][0]
df = df[df['carno']==testcar]
dfs.append(df)
dfin_ranknet = pd.concat(dfs)
print('dfin_ranknet size:', len(dfin_ranknet))
#modify to fit to ml model format
dfin_ranknet['startlap'] = dfin_ranknet['startlap'] - 1
dfin_ranknet['startrank'] = dfin_ranknet['startrank'] - 1
dfin_ranknet['endrank'] = dfin_ranknet['endrank'] - 1
target_ranknet, tss_ranknet = long_predict_bymloutput_multirun('ranknet-rank', dfin_ranknet, sampleCnt=sampleCnt)
return target_ranknet, tss_ranknet
# In[ ]:
def ploth(ts_entry, forecast_entry, pits,caution, pitstop,outputfile,
colors = ['r','g','m'],
plabels= ['observed','svr','arima','ranknet'],
ylabel = 'RANK'):
#plot_length = int(forecast_entry[0].samples.shape[1] *1.2)
#plot_length = forecast_entry[0].samples.shape[1] + 10
#prediction_intervals = (50.0, 90.0)
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
figcnt = len(forecast_entry)
#fig, axs = plt.subplots(figcnt,1, figsize=(8,6))
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
#colors = ['r','g','m']
#plabels = ['observed','svr','arima','ranknet']
for idx in range(figcnt):
ax = plt.subplot(figcnt, 1, idx+1)
#ax = plt.subplot(1, figcnt, idx+1)
#ts_entry.iloc[-plot_length:,0].plot(ax=axs, linewidth=1) # plot the time series
#ts_entry.iloc[-plot_length:,0].plot(ax=axs[idx], linewidth=1) # plot the time series
#plot_length = int(forecast_entry[idx].samples.shape[1] *1.2)
ts_entry[idx].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[idx].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq='1min') + 2
date_index = pd.date_range(start, periods = len(sv)-2, freq='1min')
df2 = pd.DataFrame(sv[:-2], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#for idx in range(len(forecast_entry)):
# forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='g')
forecast_entry[idx].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[idx],label=plabels[idx+1], zorder=10)
#forecast_entry[1].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='b')
#forecast_entry[2].copy_dim(0).plot(prediction_intervals=prediction_intervals, color='r')
#add mean line, compare with median
#if forecast_entry[idx].samples.shape[0] > 1:
if idx>3:
mean_forecast = copy.deepcopy(forecast_entry[idx])
mean_forecast.samples = np.mean(mean_forecast.samples, axis=0).reshape((1,-1))
mean_forecast.copy_dim(0).plot(prediction_intervals=prediction_intervals,
color='g',label='use-mean', zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
#if idx==0:
ax.set_ylabel(ylabel)
if idx==0:
plt.title(outputfile)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[idx])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcar(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Arima','RrankNet-Oracle','RrankNet-MLP'])
def plotcar_laptime(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
'ranknet-oracle-laptime-forecast-%d'%carno,
colors = ['m','r'],
plabels= ['observed','RrankNet-Oracle','RrankNet-MLP'],
ylabel='LapTime')
def plotrank(outputfile, mode='RANK' ):
"""
input:
alldata, rankdata; global data
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
if mode == 'RANK':
ax.plot(ranks, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='Rank')
ax.set_ylim((-5,+35))
ax.plot(pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop)
else:
ax.plot(laptimes, linewidth=1, color='b',marker='*', alpha=0.7, zorder=-1, label='LapTime')
ax.set_ylim((30,140))
ax.plot(pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
#add racestatus
add_status(ax,0, caution, pitstop,y=32, height=5)
ax.set_xlim((0,200))
ax.set_ylabel('car-%d'%carno)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def plotcarx(carno):
"""
input:
alldata, rankdata; global data
"""
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
oracle_tss, oracle_targets = oracledata[carno]
tsss[2] = oracle_tss[1]
targets[2] = oracle_targets[1]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss[:5], targets[:5], pits, caution, pitstop,
'ranknet-rf-rank-forecast-%d'%carno,
colors = ['y','c','g','m','r'],
plabels= ['observed','SVR','RF','Weighted-Oracle','RrankNet-Oracle','RrankNet-MLP'])
def plotoracle(alldata, carno, destdir):
"""
input:
alldata, rankdata; global data
"""
outputfile = destdir + 'ranknet-oracle-forecast-%d'%carno
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
tsss, targets = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ploth(tsss, targets, pits, caution, pitstop,
outputfile,
colors = ['y','c','g','m','r'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'])
def plotallcars(alldata, outputfile, drawid = 0,
colors = ['g','c','m','r','y'],
plabels= ['observed','1run-samples','1run-df','multimean','norerank-multimean','mrun-samples'],
ylabel='RANK'):
"""
plot a single fig for all cars
input:
prediction_length,freq ; global var
alldata, rankdata; global data
drawid : long prediction result index in alldata[carno] to draw
"""
figcnt = len(alldata)
fig, axs = plt.subplots(1, figcnt, figsize=(12,3*figcnt))
prediction_intervals = [90.0]
#legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 12,
}
carlist = list(alldata.keys())
for idx, carno in enumerate(carlist):
#target_svr, target_rf,target_arima, target_oracle, target_ranknet_1run = savedata[carno]
#target_oracle(by longpredict), tss_oracle_multirun,tss_ranknet_multirun
ts_entry, forecast_entry = alldata[carno]
pits, cautions, caution, pitstop,ranks,laptimes = get_racestatus(carno, rankdata)
print(np.where(pitstop==1))
ax = plt.subplot(figcnt, 1, idx+1)
# observed
ts_entry[drawid].iloc[:,0].plot(linewidth=1, color='b',
marker='*', alpha=0.7, zorder=-1, label=plabels[0])
# currank
sv = ts_entry[drawid].iloc[:,0].to_numpy()
start = pd.Timestamp("01-01-2019", freq=freq) + prediction_length
date_index = pd.date_range(start, periods = len(sv)-prediction_length, freq=freq)
df2 = pd.DataFrame(sv[:-prediction_length], index=date_index)
df2.iloc[:,0].plot(linewidth=0.5, color='k',
marker='+', alpha=0.7, zorder=-1, label='CurRank')
#forecast
forecast_entry[drawid].copy_dim(0).plot(prediction_intervals=prediction_intervals,
color=colors[drawid],label=plabels[drawid+1], zorder=10)
if idx == figcnt-1:
ax.set_xlabel('Lap')
ax.set_ylabel(ylabel)
locs, labels = plt.xticks()
#plt.xticks(locs, range(len(locs)))
start_loc = locs[0]
offset = range(0, 200, 5)
#new_locs = range(start_loc , start_loc+200, 10)
new_locs = [start_loc + x for x in offset]
#new_labels = [str(x-start_loc + 1) for x in new_locs]
new_labels = [str(x+1) for x in offset]
plt.xticks(new_locs, new_labels)
if figcnt==1 or idx < figcnt -1:
print('xlim:', plt.xlim())
xl, xr = plt.xlim()
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
elif idx == figcnt - 1:
xlim_h = len(ts_entry[drawid])
#xlim_h = 100
ax.set_xlim((xl+0,xl+xlim_h))
#plt.title(outputfile)
plt.text(xl + xlim_h - 15, 35, f'car-{carno}',fontdict=font)
if ylabel=='RANK':
ax.set_ylim((-5,+40))
else:
ax.set_ylim((25,175))
#ax.set_xlim((80,110))
ax.set_zorder(-1)
plt.grid(which="both", zorder=-1)
ax.set_axisbelow(True)
l=plt.legend(prop={'size': 10},loc='upper left')
l.set_zorder(0.6)
#add racestatus
if ylabel=='RANK':
ax.plot(xl+pits[:,0]-1,pits[:,1],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop)
else:
ax.plot(xl+pits[:,0]-1,pits[:,2],'^',color='r', label='PitStop', linewidth=2,alpha=0.7, zorder=-1)
add_status(ax,xl, caution, pitstop,y=27, height=3)
plt.show()
fig.tight_layout()
fig.savefig(outputfile + '.pdf')
def get_racestatus(carno, rankdata):
df12 = rankdata[rankdata['car_number']==carno]
#
# completed_laps start from 0
# in array mode completed_laps=1 should indexed by 0
#
data = df12[['completed_laps','rank','last_laptime','time_behind_leader']].values
pitstop = df12[['lap_status']].values
caution = df12[['track_status']].values
pitstop = np.array([1 if x=='P' else 0 for x in pitstop])
caution = np.array([1 if x=='Y' else 0 for x in caution])
pitidx = np.where(pitstop == 1)
pits = data[pitidx]
yidx = np.where(caution == 1)
cautions = data[yidx]
ranks = df12[['rank']].values
laptimes = df12[['last_laptime']].values
#return pits, cautions, caution, pitstop
return pits, cautions, caution[1:], pitstop[1:], ranks[1:],laptimes[1:]
#red = '#ff8080'
red = 'red'
#yellow = '#8080ff'
yellow = 'yellow'
#green = '#80ff80'
green = 'green'
def add_status(axs,xl, caution, pitstop, maxlap= 200, y=-4, height=2):
"""
input:
caution, pitstop : race status
"""
maxlap = min(len(caution), len(pitstop))
for lap in range(maxlap):
fc = green
if caution[lap] == 1:
fc = yellow
if pitstop[lap] == 1:
fc = red
ec = fc
rectangle = plt.Rectangle((lap+xl-0.5,y), 1, height, fc=fc,ec=ec)
#plt.gca().add_patch(rectangle)
axs.add_patch(rectangle)
# In[ ]:
def get_config():
config = [
_savedata,
_skip_overwrite,
_inlap_status,
_feature_mode,
_featureCnt,
freq ,
_train_len,
prediction_length,
context_ratio,
context_length,
contextlen,
dataset,
epochs,
gpuid,
_use_weighted_model,
trainmodel,
_use_cate_feature,
use_feat_static,
distroutput,
batch_size,
loopcnt,
_test_event,
testmodel,
pitmodel,
year
]
return config
# ## run
# In[ ]:
if len(sys.argv) != 2:
print('usage: RankNet-QuickTest.py <configfile>')
sys.exit(0)
configfile = sys.argv[1]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
#
# global settings
#
#_savedata = False
_savedata = True
_skip_overwrite = True
#inlap status =
# 0 , no inlap
# 1 , set previous lap
# 2 , set the next lap
_inlap_status = 0
#
# featuremode in [FEATURE_STATUS, FEATURE_PITAGE]:
#
_feature_mode = FEATURE_LEADERPITCNT
_featureCnt = 9
#
# training parameters
#
freq = "1min"
_train_len = 60
prediction_length = 2
context_ratio = 0.
context_length = 60
contextlen = context_length
dataset='rank'
epochs = 1000
#epochs = 10
gpuid = 5
#'deepAR-Oracle','deepARW-Oracle'
_use_weighted_model = True
trainmodel = 'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = False
use_feat_static = _use_cate_feature
distroutput = 'student'
batch_size = 32
#
# test parameters
#
loopcnt = 2
_test_event = 'Indy500-2018'
testmodel = 'oracle'
pitmodel = 'oracle'
year = '2018'
#config2 = get_config()
# In[ ]:
# new added parameters
_test_train_len = 40
#loopcnt = 2
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
# In[ ]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events_id={key:idx for idx, key in enumerate(events)}
dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
# standard output file names
LAPTIME_DATASET = f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'stagedata-{dbid}.pickle'
EVALUATION_RESULT_DF = f'evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'forecast-figs-d{dataset}_m{testmodel}/'
# ### 1. make laptime dataset
# In[ ]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(outputRoot + LAPTIME_DATASET) and os.path.exists(outputRoot + STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',outputRoot + LAPTIME_DATASET, outputRoot + STAGE_DATASET)
with open(outputRoot + LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(outputRoot + STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata,inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = outputRoot + LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = outputRoot + STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### 2. make gluonts db
# In[ ]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[ ]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[ ]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
simulation_outfile=outputRoot + f'shortterm-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
if _skip_overwrite and os.path.exists(simulation_outfile):
print('Load Simulation Results:',simulation_outfile)
with open(simulation_outfile, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len)
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(simulation_outfile, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
# ### 5. final evaluation
# In[ ]:
if _skip_overwrite and os.path.exists(outputRoot + EVALUATION_RESULT_DF):
print('Load Evaluation Results:',outputRoot + EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(outputRoot + EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[year] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[year][1]
normallaps = set([x for x in range(1,201)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(outputRoot + EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[ ]:
if _skip_overwrite and os.path.exists(outputRoot + LONG_FORECASTING_DFS):
fname = outputRoot + LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle)
#no rerank
df = ranknetdf['2018'][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0],sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(outputRoot + LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[ ]:
destdir = outputRoot + FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open('stagedata-Indy500_2013_2019_v9_p0.pickle', 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata['Indy500-2018']
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# In[ ]:
print(outputRoot)
print(oracle_eval_result)
| 105,355 | 32.382763 | 189 | py |
rankpredictor | rankpredictor-master/run/22.PaperFinal/notebook/RankNet-QuickTest-Slim.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--year", default='', dest="year")
parser.add_option("--test_event", default='', dest="test_event")
parser.add_option("--suffix", default='', dest="suffix")
parser.add_option("--dataroot", default='test/', dest="dataroot")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# new added parameters
_draw_figs = False
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid >= 0:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.year != '':
year = opt.year
if opt.test_event != '':
_test_event = opt.test_event
if opt.suffix:
_debugstr = f'-{opt.suffix}'
else:
_debugstr = ''
dataroot = opt.dataroot
#discard year
year = _test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
cur_featurestr = decode_feature_mode(_feature_mode)
print('feature_mode:', _feature_mode, cur_featurestr)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
#print('year:', year)
print('test_event:', _test_event)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
#events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
#events_totalmiles=[256,500,372,268,500,310]
#events_laplen = [1.022,2.5,1.5,0.894,2.5,1.25]
events_info = {
'Phoenix':(256, 1.022, 250),'Indy500':(500,2.5,200),'Texas':(372,1.5,248),
'Iowa':(268,0.894,300),'Pocono':(500,2.5,200),'Gateway':(310,1.25,248)
}
years = ['2013','2014','2015','2016','2017','2018','2019']
events = [f'Indy500-{x}' for x in years]
events.extend(['Phoenix-2018','Texas-2018','Texas-2019','Pocono-2018','Pocono-2019','Iowa-2018','Iowa-2019',
'Gateway-2018','Gateway-2019'])
events_id={key:idx for idx, key in enumerate(events)}
#dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
_train_events = [events_id[x] for x in [f'Indy500-{x}' for x in ['2013','2014','2015','2016','2017']]]
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
version = f'IndyCar-d{len(events)}-endlap'
# standard output file names
LAPTIME_DATASET = f'{outputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{outputRoot}/stagedata-{dbid}.pickle'
# year related
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar.events = events
gvar.events_id = events_id
gvar.events_info = events_info
gvar._train_events = _train_events
gvar.maxlap = get_event_info(_test_event)[2]
gvar.dbid = dbid
gvar.LAPTIME_DATASET = LAPTIME_DATASET
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = stagedata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
# ### 2. make gluonts db
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = 0.,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim, distr_output = distr_output,use_feat_static = use_feat_static)
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# In[10]:
# ### 5. final evaluation
# In[11]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[event] = [pitlaps, pitcoveredlaps]
with open('pitcoveredlaps-alldata-g1.pickle', 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
pitdata = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','SignAcc', 'MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[_test_event][1]
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-oracle-t0-tuned.pickle'
else:
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
#discard old year
#year <- _test_event
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[_test_event]['lasso'])
pitlaps, cautionlaps = get_racestatus_all(rankdata)
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# split evaluation
if True:
for laptype in ['normalpit','cautionpit']:
# select the set
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - set(cautionlaps)
if laptype == 'normalpit':
sellaps = normallaps
clearlaps = cautionlaps
else:
sellaps = cautionlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-1 for x in sellaps]
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
# evaluation start
for clf in ['currank','rf','svr_lin','xgb']:
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
#debug
if clf == 'currank':
print('currank min startlap:', np.min(dfout.startlap.values))
print('currank startlaps:', dfout.startlap.values)
print('currank endlaps:', dfout.endlap.values)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret(dfout)[0]
fsamples, ftss = df2samples_ex(dfout)
#fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,models[clf],configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
print('ranknet min startlap:', np.min(dfout.startlap.values))
print('ranknet startlaps:', dfout.startlap.values)
print('ranknet endlaps:', sorted(set(list((dfout.endlap.values)))))
print('sel laps::', startlaps)
print('clear laps::', clearidx)
print('cautionlaps:', cautionlaps)
dfoutx = dfout[dfout['startlap'].isin(clearidx)]
#dfoutx = dfout[dfout['endlap'].isin(clearidx)]
print('matched cleared endlaps::', sorted(set(list((dfoutx.endlap.values)))))
dfout = dfout[dfout['startlap'].isin(startlaps)]
#dfout = dfout[dfout['endlap'].isin(startlaps)]
print('matched endlaps::', sorted(set(list((dfout.endlap.values)))))
accret = stint.get_evalret(dfout)[0]
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,f'{testmodel}',configname,laptype,accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# end of evaluation
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[12]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=0, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[13]:
if _draw_figs:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
with open(STAGE_DATASET, 'rb') as f:
stagedata = pickle.load(f, encoding='latin1')
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#set gobal variable
gvar.rankdata = rankdata
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 34,588 | 36.393514 | 185 | py |
rankpredictor | rankpredictor-master/run/19.benchmark/tftrain/opt_testve_eager.py | #!/usr/bin/env python
# coding: utf-8
# ### test deepar tensorflow
from numpy.random import normal
import tqdm
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from optparse import OptionParser
import tensorflow as tf
#from indycar.model.deepartf.dataset.time_series import MockTs
from indycar.model.deepartfve.dataset.time_series import RecordTs
from indycar.model.deepartfve.model_eager.lstm import DeepAR
#from indycar.model.deepartfve.model.lstm import DeepAR
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def get_sample_prediction_gaussian(sample, fn):
sample = np.array(sample).reshape(1, _seq_len, 1)
output = fn([sample])
samples = []
for mu,sigma in zip(output[0].reshape(_seq_len), output[1].reshape(_seq_len)):
samples.append(normal(loc=mu, scale=np.sqrt(sigma), size=1)[0])
return np.array(samples)
def get_sample_prediction(sample, model, verbose = False):
sample = np.array(sample).reshape(1, _seq_len, -1)
output = model.predict([sample])
output2 = np.zeros((_seq_len, len(output)))
for idx, x in enumerate(output):
output2[:,idx] = x.reshape(_seq_len)
#output2 = np.array(output).reshape(_seq_len, -1)
if verbose:
print('output.shape=',[len(x) for x in output])
print('output:', output)
print('output2.shape=', output2.shape)
print('output2:', output2)
samples = []
#for theta in zip(output[0].reshape(_seq_len), output[1].reshape(_seq_len)):
for theta in output2:
samples.append(model.get_sample(theta))
return np.array(samples)
def predict(model):
#batch = ts.next_batch(_batch_size, _seq_len)
batch = ts.next_batch(-1, _seq_len)
#get_sample_prediction(batch[0], model, verbose=True)
#get_sample_prediction(batch[0], model, verbose=True)
ress = []
for i in tqdm.tqdm(range(300)):
#ress.append(get_sample_prediction(batch[0], model.predict_theta_from_input))
ress.append(get_sample_prediction(batch[0], model))
res_df = pd.DataFrame(ress).T
tot_res = res_df
plt.plot(batch[1].reshape(_seq_len), linewidth=6)
tot_res['mu'] = tot_res.apply(lambda x: np.mean(x), axis=1)
tot_res['upper'] = tot_res.apply(lambda x: np.mean(x) + np.std(x), axis=1)
tot_res['lower'] = tot_res.apply(lambda x: np.mean(x) - np.std(x), axis=1)
tot_res['two_upper'] = tot_res.apply(lambda x: np.mean(x) + 2*np.std(x), axis=1)
tot_res['two_lower'] = tot_res.apply(lambda x: np.mean(x) - 2*np.std(x), axis=1)
plt.plot(tot_res.mu, 'bo')
plt.plot(tot_res.mu, linewidth=2)
plt.fill_between(x = tot_res.index, y1=tot_res.lower, y2=tot_res.upper, alpha=0.5)
plt.fill_between(x = tot_res.index, y1=tot_res.two_lower, y2=tot_res.two_upper, alpha=0.5)
plt.title('Prediction uncertainty')
return batch, tot_res
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if epoch == 5:
print('Write vtune-flag.txt')
with open('./vtune-flag.txt','w') as flagf:
flagf.write('hi')
# cmd argument parser
usage = 'testve.py --usecore # --usegenerator'
parser = OptionParser(usage)
parser.add_option("--usegenerator", action="store_true", default=False, dest="usegenerator")
parser.add_option("--usecore", type=int, default=-1, dest="usecore")
parser.add_option("--batchsize", type=int, default=32, dest="batchsize")
parser.add_option("--contextlen", type=int, default=40, dest="contextlen")
parser.add_option("--epochs", type=int, default=10, dest="epochs")
opt, args = parser.parse_args()
#use_cores=2
#tf.config.experimental_run_functions_eagerly(False)
#tf.config.experimental_run_functions_eagerly(True)
#tf.compat.v1.disable_eager_execution()
use_cores = opt.usecore
if use_cores > 0:
print(f'Set usecore:{use_cores}')
#tf.config.threading.set_inter_op_parallelism_threads(use_cores)
#tf.config.threading.set_intra_op_parallelism_threads(16)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(use_cores)
tf.config.set_soft_device_placement(True)
os.environ["OMP_NUM_THREADS"] = f"{use_cores}"
os.environ["KMP_BLOCKTIME"] = "0"
os.environ["KMP_SETTINGS"] = "1"
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
ts = RecordTs('savedata_drank_e1.pickle')
#_context_len = 40
_context_len = opt.contextlen
_prediction_len = 2
_batch_size = opt.batchsize
_seq_len = _context_len + _prediction_len
_epochs = opt.epochs
# train
callbacks = myCallback()
model3 = DeepAR(ts, epochs=_epochs, distribution='StudentT',
with_custom_nn_structure=DeepAR.encoder_decoder, use_generator = opt.usegenerator)
model3.fit(context_len=_context_len, prediction_len=_prediction_len,
input_dim = 33, batch_size = _batch_size, callbacks=[callbacks])
# prediction
#batch, df = predict(model3)
| 4,977 | 33.331034 | 94 | py |
rankpredictor | rankpredictor-master/run/25.sotamodels/notebook/RankNet-QuickTest-Slim.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator_sota as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules_sota import *
from indycar.model.deep_factor import DeepFactorXEstimator
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--trainrace", default='Indy500', dest="trainrace")
parser.add_option("--test_event", default='', dest="test_event")
parser.add_option("--suffix", default='', dest="suffix")
parser.add_option("--dataroot", default='data/', dest="dataroot")
parser.add_option("--prediction_length", default=-1,type='int', dest="prediction_length")
parser.add_option("--context_length", default=-1,type='int', dest="context_length")
parser.add_option("--weight_coef", default=-1,type='float', dest="weight_coef")
parser.add_option("--lr", default=1e-3,type='float', dest="learning_rate")
parser.add_option("--patience", default=10,type='int', dest="patience")
parser.add_option("--use_validation", action="store_true", default=False, dest="use_validation")
parser.add_option("--context_ratio", default=-1,type='float', dest="context_ratio")
parser.add_option("--test_context_ratio", default=-1,type='float', dest="test_context_ratio")
parser.add_option("--batch_size", default=-1,type='int', dest="batch_size")
parser.add_option("--use_cat_feat", default=-1, type='int', dest="use_cat_feat")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
#use_feat_static = _use_cate_feature
#config1 = get_config()
train_years = config.get(section, "train_years", fallback='2013,2014,2015,2016,2017')
_train_years = train_years.split(',')
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# new added parameters
_draw_figs = False
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
_weight_coef = 9
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid >= 0 or opt.gpuid < -1:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.prediction_length > 0:
prediction_length = opt.prediction_length
if opt.context_length > 0:
context_length = opt.context_length
if opt.weight_coef > 0:
_weight_coef = opt.weight_coef
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.test_event != '':
_test_event = opt.test_event
if opt.suffix:
_debugstr = f'-{opt.suffix}'
else:
_debugstr = ''
if opt.context_ratio > 0:
context_ratio = opt.context_ratio
if opt.test_context_ratio > 0:
test_context_ratio = opt.test_context_ratio
else:
test_context_ratio = context_ratio
if opt.batch_size > 0:
batch_size = opt.batch_size
if opt.learning_rate > 0:
gvar.learning_rate = opt.learning_rate
if opt.patience > 0:
gvar.patience = opt.patience
if opt.use_cat_feat >=0:
_use_cate_feature = True if opt.use_cat_feat>0 else False
gvar.use_validation = opt.use_validation
dataroot = opt.dataroot
trainrace = opt.trainrace
#discard year
year = _test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
cur_featurestr = decode_feature_mode(_feature_mode)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
# event -> car#, maxlap
events_info = {
'Phoenix':(256, 1.022, 250),'Indy500':(500,2.5,200),'Texas':(372,1.5,248),
'Iowa':(268,0.894,300),'Pocono':(500,2.5,200),'Gateway':(310,1.25,248)
}
_race_info = {}
# the races have 7 years data
races = ['Indy500', 'Texas','Iowa','Pocono']
years = ['2013','2014','2015','2016','2017','2018','2019']
events = []
for race in races:
events.extend([f'{race}-{x}' for x in years])
events.extend(['Phoenix-2018','Gateway-2018','Gateway-2019'])
events_id={key:idx for idx, key in enumerate(events)}
# dataset shared
dataSuffix1 = "simpledb" if gvar.use_simpledb else "fulldb"
dataSuffix2 = "driverid" if gvar.use_driverid else "carid"
dataOutputRoot = f"data_{dataSuffix1}_{dataSuffix2}/"
os.makedirs(dataOutputRoot, exist_ok=True)
covergap = 1
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
LAPTIME_DATASET = f'{dataOutputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{dataOutputRoot}/stagedata-{dbid}.pickle'
PITCOVERED_DATASET = f'{dataOutputRoot}/pitcoveredlaps-{dbid}-g{covergap}.pickle'
#dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#trainrace = 'Indy500'
#_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in ['2013','2014','2015','2016','2017']]]
#patch
if trainrace.find(',') > 0:
_train_events = []
for race in trainrace.split(','):
if trainrace == 'Pocono':
_train_years = ['2013','2015','2016','2017']
else:
_train_years = ['2013','2014','2015','2016','2017']
_train_events.extend([events_id[x] for x in [f'{race}-{x}' for x in _train_years]])
if pitmodel != 'oracle':
logger.error('WARNING: pitmodel should be oracle when training on multiple races')
sys.exit(-1)
else:
if trainrace == 'Pocono':
_train_years = ['2013','2015','2016','2017']
_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in _train_years]]
#replace TRAINRACE in pitmodel
if pitmodel.find('TRAINRACE') > 0:
pitmodel = pitmodel.replace('TRAINRACE', trainrace)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
#version = f'IndyCar-d{len(events)}-endlap'
version = f'IndyCar-d{trainrace}-endlap'
# standard output file names
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.test_context_ratio = test_context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
#gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar._train_events = _train_events
gvar._weight_coef = _weight_coef
gvar.dbid = dbid
gvar.LAPTIME_DATASET = LAPTIME_DATASET
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
gvar._race_info = _race_info
gvar.events = events
gvar.events_id = events_id
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
#stagedata = pickle.load(f, encoding='latin1')
stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
with open(PITCOVERED_DATASET, 'rb') as f:
pitdata = pickle.load(f, encoding='latin1')
#check it
if not _events == events:
print('Error, events mismatch at:', STAGE_DATASET)
sys.exit(-1)
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
_race_info[event] = (len(carlist), len(laplist)-1, max(laplist))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
### check the inlap
pitdata = {}
for event in events:
alldata, rankdata, acldata, flagdata = stagedata[event]
totallaps = np.max(rankdata.completed_laps.to_numpy())
#pitlaps = rankdata[rankdata['lap_status']=='P'][['completed_laps']].to_numpy()
pitlaps = rankdata[rankdata['lap_status']=='P'].completed_laps.to_numpy()
pitlaps = set(sorted(pitlaps))
pitcoveredlaps = []
for lap in pitlaps:
gap = range(lap - covergap, lap + covergap+1)
#pitcoveredlaps.extend([lap -2,lap-1,lap,lap+1,lap+2])
pitcoveredlaps.extend(gap)
pitcoveredlaps = set(sorted(pitcoveredlaps))
print(event, 'total:', totallaps, 'pitlaps:', len(pitlaps), 'pitcoveredlaps:', len(pitcoveredlaps))
#save
pitdata[event] = [pitlaps, pitcoveredlaps]
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [stagedata, _race_info, events, events_id]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
with open(PITCOVERED_DATASET, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = pitdata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
gvar._race_info = _race_info
gvar.events = events
gvar.events_id = events_id
gvar.maxlap = get_event_info(_test_event)[2]
gvar.events_info = events_info
gvar.trainrace = trainrace
# ### 2. make gluonts db
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
print('trainrace:', trainrace)
print('train_years:', _train_years)
print('trainevents:', _train_events)
print('feature_mode:', _feature_mode, cur_featurestr)
print('trainmodel:', trainmodel)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
print('test_event:', _test_event)
print('prediction_length:', prediction_length)
print('context_length:', context_length)
print('weight_coef:', _weight_coef)
print('context_ratio:', context_ratio)
print('gpuid:', gpuid)
sys.stdout.flush()
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, ',prediction_length=', prediction_length,
',cardinality=',cardinality,',train_ds len=', len(train_ds))
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = context_ratio,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=context_ratio, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if trainmodel == 'arima':
print('Skip train arima model')
elif _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim,
distr_output = distr_output,use_feat_static = gvar._use_cate_feature,
cardinality = [len(train_ds)] if gvar.static_cat_type==2 else cardinality)
#tsCnt = len(train_ds))
if gvar.use_validation:
predictor = estimator.train(train_ds, test_ds)
else:
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# In[10]:
# ### 5. final evaluation
# In[11]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[event] = [pitlaps, pitcoveredlaps]
#with open(PITCOVERED_DATASET, 'rb') as f:
# pitdata = pickle.load(f, encoding='latin1')
#with open(STAGE_DATASET, 'rb') as f:
# stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','SignAcc', 'MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
#simulation runs for all test ts
#context_ratio -> only evaluate on part of the testset
startlap = 10
if gvar.test_context_ratio > 0:
maxlap = np.max(dfout['startlap'].values)
minlap = np.min(dfout['startlap'].values)
startlap = int(maxlap * gvar.test_context_ratio)
dfout = dfout[dfout['startlap']>startlap]
print('evalate on dfout, min/max=',minlap, maxlap, ' startlap = ', startlap, ' size=', len(dfout))
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[_test_event][1]
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-oracle-t0-tuned.pickle'
else:
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
#discard old year
#year <- _test_event
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[_test_event]['lasso'])
pitlaps, cautionlaps = get_racestatus_all(rankdata)
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# split evaluation
if True:
for laptype in ['normalpit','cautionpit']:
# select the set
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - set(cautionlaps)
if laptype == 'normalpit':
sellaps = normallaps
clearlaps = cautionlaps
else:
sellaps = cautionlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-1 for x in sellaps]
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
# evaluation start
for clf in ['currank','rf','svr_lin','xgb']:
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
#debug
if clf == 'currank':
print('currank min startlap:', np.min(dfout.startlap.values))
print('currank startlaps:', dfout.startlap.values)
print('currank endlaps:', dfout.endlap.values)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret(dfout)[0]
fsamples, ftss = df2samples_ex(dfout)
#fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,models[clf],configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
print('ranknet min startlap:', np.min(dfout.startlap.values))
print('ranknet startlaps:', dfout.startlap.values)
print('ranknet endlaps:', sorted(set(list((dfout.endlap.values)))))
print('sel laps::', startlaps)
print('clear laps::', clearidx)
print('cautionlaps:', cautionlaps)
dfoutx = dfout[dfout['startlap'].isin(clearidx)]
#dfoutx = dfout[dfout['endlap'].isin(clearidx)]
print('matched cleared endlaps::', sorted(set(list((dfoutx.endlap.values)))))
dfout = dfout[dfout['startlap'].isin(startlaps)]
#dfout = dfout[dfout['endlap'].isin(startlaps)]
print('matched endlaps::', sorted(set(list((dfout.endlap.values)))))
accret = stint.get_evalret(dfout)[0]
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,f'{testmodel}',configname,laptype,accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# end of evaluation
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[12]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=context_ratio, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[13]:
if _draw_figs:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
#with open(STAGE_DATASET, 'rb') as f:
# stagedata = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#set gobal variable
gvar.rankdata = rankdata
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 40,631 | 36.345588 | 185 | py |
rankpredictor | rankpredictor-master/run/24.inctrain/RankNet-QuickTest-Slim.py | #!/usr/bin/env python
# coding: utf-8
# ## QuickTest Slim
#
# based on : RankNet-QuickTest-Joint
#
# makedb laptime
# makedb gluonts
# train model
# evaluate model
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os,sys
import random
import mxnet as mx
from mxnet import gluon
import pickle
import json
import copy
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from pathlib import Path
import configparser
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from gluonts.dataset.util import to_pandas
from gluonts.distribution.neg_binomial import NegativeBinomialOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from indycar.model.NaivePredictor import NaivePredictor
from indycar.model.deeparw import DeepARWeightEstimator
#import indycar.model.stint_simulator_shortterm_pitmodel as stint
import indycar.model.quicktest_simulator as stint
# import all functions
#from indycar.model.global_variables import _hi
import indycar.model.global_variables as gvar
from indycar.model.quicktest_modules_inctrain import *
# ## run
# In[2]:
### run
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
# logging configure
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# cmd argument parser
usage = 'RankNet-QuickTest.py <configfile> [options]'
parser = OptionParser(usage)
parser.add_option("--forecast_mode", dest="forecast_mode", default="")
parser.add_option("--trainmodel", default='', dest="trainmodel")
parser.add_option("--testmodel", default='', dest="testmodel")
parser.add_option("--joint_train", action="store_true", default=False, dest="joint_train")
parser.add_option("--loopcnt", default=-1,type='int', dest="loopcnt")
parser.add_option("--gpuid", default=-1,type='int', dest="gpuid")
parser.add_option("--pitmodel_bias", default=-1, type='int', dest="pitmodel_bias")
parser.add_option("--trainrace", default='Indy500', dest="trainrace")
parser.add_option("--test_event", default='', dest="test_event")
parser.add_option("--suffix", default='', dest="suffix")
parser.add_option("--dataroot", default='data/', dest="dataroot")
parser.add_option("--prediction_length", default=-1,type='int', dest="prediction_length")
parser.add_option("--context_length", default=-1,type='int', dest="context_length")
parser.add_option("--weight_coef", default=-1,type='float', dest="weight_coef")
parser.add_option("--lr", default=1e-3,type='float', dest="learning_rate")
parser.add_option("--patience", default=10,type='int', dest="patience")
parser.add_option("--use_validation", action="store_true", default=False, dest="use_validation")
parser.add_option("--context_ratio", default=-1,type='float', dest="context_ratio")
parser.add_option("--test_context_ratio", default=-1,type='float', dest="test_context_ratio")
parser.add_option("--batch_size", default=-1,type='int', dest="batch_size")
opt, args = parser.parse_args()
print(len(args), opt.joint_train)
#check validation
if len(args) != 1:
logger.error(globals()['__doc__'] % locals())
sys.exit(-1)
configfile = args[0]
base=os.path.basename(configfile)
configname = os.path.splitext(base)[0]
WorkRootDir = 'QuickTestOutput'
#configname = 'weighted-noinlap-nopitage-nocate-c60-drank'
#configfile = f'{configname}.ini'
if not os.path.exists(configfile):
print('config file not exists error:', configfile)
sys.exit(-1)
if configfile != '':
config = configparser.RawConfigParser()
#config.read(WorkRootDir + '/' + configfile)
config.read(configfile)
#set them back
section = "RankNet-QuickTest"
_savedata = config.getboolean(section, "_savedata")
_skip_overwrite = config.getboolean(section, "_skip_overwrite")
_inlap_status = config.getint(section, "_inlap_status") #0
_feature_mode = config.getint(section, "_feature_mode") #FEATURE_STATUS
_featureCnt = config.getint(section, "_featureCnt") #9
freq = config.get(section, "freq") #"1min"
_train_len = config.getint(section, "_train_len") #40
prediction_length = config.getint(section, "prediction_length") #2
context_ratio = config.getfloat(section, "context_ratio") #0.
context_length = config.getint(section, "context_length") #40
dataset= config.get(section, "dataset") #'rank'
epochs = config.getint(section, "epochs") #1000
gpuid = config.getint(section, "gpuid") #5
_use_weighted_model = config.getboolean(section, "_use_weighted_model")
trainmodel = config.get(section, "trainmodel") #'deepARW-Oracle' if _use_weighted_model else 'deepAR-Oracle'
_use_cate_feature = config.getboolean(section, "_use_cate_feature")
distroutput = config.get(section, "distroutput") #'student'
batch_size = config.getint(section, "batch_size") #32
loopcnt = config.getint(section, "loopcnt") #2
_test_event = config.get(section, "_test_event") #'Indy500-2018'
testmodel = config.get(section, "testmodel") #'oracle'
pitmodel = config.get(section, "pitmodel") #'oracle'
year = config.get(section, "year") #'2018'
contextlen = context_length
use_feat_static = _use_cate_feature
#config1 = get_config()
train_years = config.get(section, "train_years", fallback='2013,2014,2015,2016,2017')
_train_years = train_years.split(',')
else:
print('Warning, please use config file')
sys.exit(0)
# In[3]:
# new added parameters
_draw_figs = False
_test_train_len = 40
_joint_train = False
_pitmodel_bias = 0
#shortterm, stint
#_forecast_mode = 'stint'
_forecast_mode = 'shortterm'
_weight_coef = 9
#load arguments overwites
if opt.forecast_mode != '':
_forecast_mode = opt.forecast_mode
if opt.trainmodel != '':
trainmodel = opt.trainmodel
if opt.testmodel != '':
testmodel = opt.testmodel
if opt.joint_train != False:
_joint_train = True
if opt.gpuid >= 0 or opt.gpuid < -1:
gpuid = opt.gpuid
if opt.loopcnt > 0:
loopcnt = opt.loopcnt
if opt.prediction_length > 0:
prediction_length = opt.prediction_length
if opt.context_length > 0:
context_length = opt.context_length
if opt.weight_coef > 0:
_weight_coef = opt.weight_coef
if opt.pitmodel_bias >= 0:
_pitmodel_bias = opt.pitmodel_bias
if opt.test_event != '':
_test_event = opt.test_event
if opt.suffix:
_debugstr = f'-{opt.suffix}'
else:
_debugstr = ''
if opt.context_ratio > 0:
context_ratio = opt.context_ratio
if opt.test_context_ratio > 0:
test_context_ratio = opt.test_context_ratio
else:
test_context_ratio = context_ratio
if opt.batch_size > 0:
batch_size = opt.batch_size
if opt.learning_rate > 0:
gvar.learning_rate = opt.learning_rate
if opt.patience > 0:
gvar.patience = opt.patience
gvar.use_validation = opt.use_validation
dataroot = opt.dataroot
trainrace = opt.trainrace
#discard year
year = _test_event
if testmodel == 'pitmodel':
testmodel = 'pitmodel%s'%(_pitmodel_bias if _pitmodel_bias!=0 else '')
cur_featurestr = decode_feature_mode(_feature_mode)
# In[4]:
#
# string map
#
inlapstr = {0:'noinlap',1:'inlap',2:'outlap'}
weightstr = {True:'weighted',False:'noweighted'}
catestr = {True:'cate',False:'nocate'}
#
# input data parameters
#
# event -> car#, maxlap
events_info = {
'Phoenix':(256, 1.022, 250),'Indy500':(500,2.5,200),'Texas':(372,1.5,248),
'Iowa':(268,0.894,300),'Pocono':(500,2.5,200),'Gateway':(310,1.25,248)
}
_race_info = {}
# the races have 7 years data
races = ['Indy500', 'Texas','Iowa','Pocono']
years = ['2013','2014','2015','2016','2017','2018','2019']
events = []
for race in races:
events.extend([f'{race}-{x}' for x in years])
events.extend(['Phoenix-2018','Gateway-2018','Gateway-2019'])
events_id={key:idx for idx, key in enumerate(events)}
# dataset shared
dataOutputRoot = "data/"
covergap = 1
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
LAPTIME_DATASET = f'{dataOutputRoot}/laptime_rank_timediff_pit-oracle-{dbid}.pickle'
STAGE_DATASET = f'{dataOutputRoot}/stagedata-{dbid}.pickle'
PITCOVERED_DATASET = f'{dataOutputRoot}/pitcoveredlaps-{dbid}-g{covergap}.pickle'
#dbid = f'Indy500_{years[0]}_{years[-1]}_v{_featureCnt}_p{_inlap_status}'
dbid = f'IndyCar_d{len(events)}_v{_featureCnt}_p{_inlap_status}'
_dataset_id = '%s-%s'%(inlapstr[_inlap_status], cur_featurestr)
#trainrace = 'Indy500'
#_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in ['2013','2014','2015','2016','2017']]]
#patch
if trainrace.find(',') > 0:
_train_events = []
for race in trainrace.split(','):
if trainrace == 'Pocono':
_train_years = ['2013','2015','2016','2017']
else:
_train_years = ['2013','2014','2015','2016','2017']
_train_events.extend([events_id[x] for x in [f'{race}-{x}' for x in _train_years]])
if pitmodel != 'oracle':
logger.error('WARNING: pitmodel should be oracle when training on multiple races')
sys.exit(-1)
else:
if trainrace == 'Pocono':
_train_years = ['2013','2015','2016','2017']
_train_events = [events_id[x] for x in [f'{trainrace}-{x}' for x in _train_years]]
#replace TRAINRACE in pitmodel
if pitmodel.find('TRAINRACE') > 0:
pitmodel = pitmodel.replace('TRAINRACE', trainrace)
#
# internal parameters
#
distr_outputs ={'student':StudentTOutput(),
'negbin':NegativeBinomialOutput()
}
distr_output = distr_outputs[distroutput]
#
#
#
experimentid = f'{weightstr[_use_weighted_model]}-{inlapstr[_inlap_status]}-{cur_featurestr}-{catestr[_use_cate_feature]}-c{context_length}{_debugstr}'
#
#
#
outputRoot = f"{WorkRootDir}/{experimentid}/"
#version = f'IndyCar-d{len(events)}-endlap'
version = f'IndyCar-d{trainrace}-endlap'
# standard output file names
SIMULATION_OUTFILE = f'{outputRoot}/{_test_event}/{_forecast_mode}-dfout-{trainmodel}-indy500-{dataset}-{inlapstr[_inlap_status]}-{cur_featurestr}-{testmodel}-l{loopcnt}-alldata.pickle'
EVALUATION_RESULT_DF = f'{outputRoot}/{_test_event}/{_forecast_mode}-evaluation_result_d{dataset}_m{testmodel}.csv'
LONG_FORECASTING_DFS = f'{outputRoot}/{_test_event}/{_forecast_mode}-long_forecasting_dfs_d{dataset}_m{testmodel}.pickle'
FORECAST_FIGS_DIR = f'{outputRoot}/{_test_event}/{_forecast_mode}-forecast-figs-d{dataset}_m{testmodel}/'
# In[5]:
# set global vars
gvar._savedata = _savedata
gvar._skip_overwrite = _skip_overwrite
gvar._inlap_status = _inlap_status
gvar._feature_mode = _feature_mode
gvar._featureCnt = _featureCnt
gvar.freq = freq
gvar._train_len = _train_len
gvar.prediction_length = prediction_length
gvar.context_ratio = context_ratio
gvar.test_context_ratio = test_context_ratio
gvar.context_length = context_length
gvar.contextlen = contextlen
gvar.dataset = dataset
gvar.epochs = epochs
gvar.gpuid = gpuid
gvar._use_weighted_model = _use_weighted_model
gvar.trainmodel = trainmodel
gvar._use_cate_feature = _use_cate_feature
gvar.use_feat_static = use_feat_static
gvar.distroutput = distroutput
gvar.batch_size = batch_size
gvar.loopcnt = loopcnt
gvar._test_event = _test_event
gvar.testmodel = testmodel
gvar.pitmodel = pitmodel
gvar.year = year
gvar._forecast_mode = _forecast_mode
gvar._test_train_len = _test_train_len
gvar._joint_train = _joint_train
gvar._pitmodel_bias = _pitmodel_bias
gvar._train_events = _train_events
gvar._weight_coef = _weight_coef
gvar.dbid = dbid
gvar.LAPTIME_DATASET = LAPTIME_DATASET
# ### 1. make laptime dataset
# In[6]:
stagedata = {}
global_carids = {}
os.makedirs(outputRoot, exist_ok=True)
os.makedirs(f'{outputRoot}/{_test_event}', exist_ok=True)
#check the dest files first
gvar._race_info = _race_info
gvar.events = events
gvar.events_id = events_id
if _skip_overwrite and os.path.exists(LAPTIME_DATASET) and os.path.exists(STAGE_DATASET):
#
# load data
#
print('Load laptime and stage dataset:',LAPTIME_DATASET, STAGE_DATASET)
with open(LAPTIME_DATASET, 'rb') as f:
global_carids, laptime_data = pickle.load(f, encoding='latin1')
with open(STAGE_DATASET, 'rb') as f:
#stagedata = pickle.load(f, encoding='latin1')
stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
with open(PITCOVERED_DATASET, 'rb') as f:
pitdata = pickle.load(f, encoding='latin1')
#check it
if not _events == events:
print('Error, events mismatch at:', STAGE_DATASET)
sys.exit(-1)
else:
cur_carid = 0
for event in events:
#dataid = f'{event}-{year}'
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event)
alldata, rankdata, acldata, flagdata = stagedata[event]
carlist = set(acldata['car_number'])
laplist = set(acldata['completed_laps'])
print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
_race_info[event] = (len(carlist), len(laplist)-1, max(laplist))
#build the carid map
for car in carlist:
if car not in global_carids:
global_carids[car] = cur_carid
cur_carid += 1
laptime_data = get_laptime_dataset(stagedata, inlap_status = _inlap_status)
### check the inlap
pitdata = {}
for event in events:
alldata, rankdata, acldata, flagdata = stagedata[event]
totallaps = np.max(rankdata.completed_laps.to_numpy())
#pitlaps = rankdata[rankdata['lap_status']=='P'][['completed_laps']].to_numpy()
pitlaps = rankdata[rankdata['lap_status']=='P'].completed_laps.to_numpy()
pitlaps = set(sorted(pitlaps))
pitcoveredlaps = []
for lap in pitlaps:
gap = range(lap - covergap, lap + covergap+1)
#pitcoveredlaps.extend([lap -2,lap-1,lap,lap+1,lap+2])
pitcoveredlaps.extend(gap)
pitcoveredlaps = set(sorted(pitcoveredlaps))
print(event, 'total:', totallaps, 'pitlaps:', len(pitlaps), 'pitcoveredlaps:', len(pitcoveredlaps))
#save
pitdata[event] = [pitlaps, pitcoveredlaps]
if _savedata:
import pickle
#stintdf.to_csv('laptime-%s.csv'%year)
#savefile = outputRoot + f'laptime_rank_timediff_pit-oracle-{dbid}.pickle'
savefile = LAPTIME_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [global_carids, laptime_data]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#savefile = outputRoot + f'stagedata-{dbid}.pickle'
savefile = STAGE_DATASET
print(savefile)
with open(savefile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [stagedata, _race_info, events, events_id]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
with open(PITCOVERED_DATASET, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = pitdata
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#update global var
gvar.global_carids = global_carids
gvar._race_info = _race_info
gvar.events = events
gvar.events_id = events_id
gvar.maxlap = get_event_info(_test_event)[2]
gvar.events_info = events_info
gvar.trainrace = trainrace
# ### 2. make gluonts db
#featurestr = {FEATURE_STATUS:'nopitage',FEATURE_PITAGE:'pitage',FEATURE_LEADERPITCNT:'leaderpitcnt'}
#cur_featurestr = featurestr[_feature_mode]
print('current configfile:', configfile)
print('trainrace:', trainrace)
print('train_years:', _train_years)
print('trainevents:', _train_events)
print('feature_mode:', _feature_mode, cur_featurestr)
print('trainmodel:', trainmodel)
print('testmodel:', testmodel)
print('pitmodel:', pitmodel)
print('test_event:', _test_event)
print('prediction_length:', prediction_length)
print('context_length:', context_length)
print('weight_coef:', _weight_coef)
print('context_ratio:', context_ratio)
print('gpuid:', gpuid)
sys.stdout.flush()
# In[7]:
outdir = outputRoot + _dataset_id
os.makedirs(outdir, exist_ok=True)
if dataset == 'laptime':
subdir = 'laptime-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_LAPTIME
elif dataset == 'timediff':
subdir = 'timediff-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_TIMEDIFF
elif dataset == 'rank':
subdir = 'rank-indy500'
os.makedirs(f'{outdir}/{subdir}', exist_ok=True)
_run_ts = COL_RANK
else:
print('error, dataset not support: ', dataset)
_task_dir = f'{outdir}/{subdir}/'
#
#dbname, train_ds, test_ds = makedbs()
#
useeid = False
interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
jointstr = '-joint' if _joint_train else ''
dbname = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}{jointstr}.pickle'
laptimedb = _task_dir + f'gluontsdb-{dataset}-oracle-{ipstr}-all-all-f{freq}-t{prediction_length}-r{_test_event}-indy-{year}-newlaptimedata.pickle'
#check the dest files first
if _skip_overwrite and os.path.exists(dbname) and os.path.exists(laptimedb):
print('Load Gluonts Dataset:',dbname)
with open(dbname, 'rb') as f:
freq, prediction_length, cardinality, train_ds, test_ds = pickle.load(f, encoding='latin1')
print('.......loaded data, freq=', freq, 'prediction_length=', prediction_length)
print('Load New Laptime Dataset:',laptimedb)
with open(laptimedb, 'rb') as f:
prepared_laptimedata = pickle.load(f, encoding='latin1')
else:
if useeid:
cardinality = [len(global_carids), len(laptime_data)]
else:
cardinality = [len(global_carids)]
prepared_laptimedata = prepare_laptimedata(laptime_data,
prediction_length, freq, test_event = _test_event,
train_ratio=0, context_ratio = context_ratio,shift_len = prediction_length)
train_ds, test_ds,_,_ = make_dataset_byevent(prepared_laptimedata,
prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=context_ratio, train_ratio = 0, joint_train = _joint_train)
if _savedata:
print('Save Gluonts Dataset:',dbname)
with open(dbname, 'wb') as f:
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
print('Save preprocessed laptime Dataset:',laptimedb)
with open(laptimedb, 'wb') as f:
pickle.dump(prepared_laptimedata, f, pickle.HIGHEST_PROTOCOL)
# ### 3. train the model
# In[8]:
id='oracle'
run=1
runid=f'{trainmodel}-{dataset}-all-indy-f1min-t{prediction_length}-e{epochs}-r{run}_{id}_t{prediction_length}'
modelfile = _task_dir + runid
if trainmodel == 'arima':
print('Skip train arima model')
elif _skip_overwrite and os.path.exists(modelfile):
print('Model checkpoint found at:',modelfile)
else:
#get target dim
entry = next(iter(train_ds))
target_dim = entry['target'].shape
target_dim = target_dim[0] if len(target_dim) > 1 else 1
print('target_dim:%s', target_dim)
estimator = init_estimator(trainmodel, gpuid,
epochs, batch_size,target_dim,
distr_output = distr_output,use_feat_static = use_feat_static)
if gvar.use_validation:
predictor = estimator.train(train_ds, test_ds)
else:
predictor = estimator.train(train_ds)
if _savedata:
os.makedirs(modelfile, exist_ok=True)
print('Start to save the model to %s', modelfile)
predictor.serialize(Path(modelfile))
print('End of saving the model.')
#
# ### 4. evaluate the model
# In[9]:
lapmode = _inlap_status
fmode = _feature_mode
runts = dataset
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
datasetid = outputRoot + _dataset_id
if _skip_overwrite and os.path.exists(SIMULATION_OUTFILE):
print('Load Simulation Results:',SIMULATION_OUTFILE)
with open(SIMULATION_OUTFILE, 'rb') as f:
dfs,acc,ret,pret = pickle.load(f, encoding='latin1')
print('.......loaded data, ret keys=', ret.keys())
# init the stint module
#
# in test mode, set all train_len = 40 to unify the evaluation results
#
init_simulation(datasetid, _test_event, 'rank',stint.COL_RANK,'rank',prediction_length,
pitmodel=pitmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata)
else:
#run simulation
acc, ret, pret = {}, {}, {}
#lapmode = _inlap_status
#fmode = _feature_mode
#runts = dataset
#mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], featurestr[fmode])
if runts == 'rank':
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'rank',stint.COL_RANK,'rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
else:
acc[mid], ret[mid] = simulation(datasetid, _test_event,
'timediff',stint.COL_TIMEDIFF,'timediff2rank',
prediction_length, stint.MODE_ORACLE,loopcnt,
pitmodel=pitmodel, model=testmodel, inlapmode=lapmode,featuremode =fmode,
train_len = _test_train_len, forecastmode = _forecast_mode, joint_train = _joint_train,
pitmodel_bias= _pitmodel_bias, prepared_laptimedata = prepared_laptimedata,
epochs = epochs)
if _forecast_mode == 'shortterm':
allsamples, alltss = get_allsamples(ret[mid], year=year)
_, pret[mid]= prisk_direct_bysamples(allsamples, alltss)
print(pret[mid])
dfs={}
mode=1
df = get_alldf_mode(ret[mid], year=year,mode=mode, forecast_mode = _forecast_mode)
name = '%s_%s'%(testmodel, 'mean' if mode==1 else ('mode' if mode==0 else 'median'))
if year not in dfs:
dfs[year] = {}
dfs[year][name] = df
_trim = 0
_include_final = True
_include_stintlen = True
include_str = '1' if _include_final else '0'
stint_str = '1' if _include_stintlen else ''
#simulation_outfile=outputRoot + f'shortterm-dfout-oracle-indy500-{dataset}-{inlapstr[_inlap_status]}-{featurestr[_feature_mode]}-2018-oracle-l{loopcnt}-alldata-weighted.pickle'
with open(SIMULATION_OUTFILE, 'wb') as f:
savedata = [dfs,acc,ret,pret]
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
#alias
ranknetdf = dfs
ranknet_ret = ret
# In[10]:
# ### 5. final evaluation
# In[11]:
if _skip_overwrite and os.path.exists(EVALUATION_RESULT_DF):
print('Load Evaluation Results:',EVALUATION_RESULT_DF)
oracle_eval_result = pd.read_csv(EVALUATION_RESULT_DF)
else:
# get pit laps, pit-covered-laps
# pitdata[event] = [pitlaps, pitcoveredlaps]
#with open(PITCOVERED_DATASET, 'rb') as f:
# pitdata = pickle.load(f, encoding='latin1')
#with open(STAGE_DATASET, 'rb') as f:
# stagedata, _race_info, _events, _events_id = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
_alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
##-------------------------------------------------------------------------------
if _forecast_mode == 'shortterm':
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','Top1Acc','SignAcc', 'MAE','50-Risk','90-Risk']
plen = prediction_length
usemeanstr='mean'
#load data
# dfs,acc,ret,pret
retdata = []
#oracle
dfx = ret[mid]
allsamples, alltss = get_allsamples(dfx, year=year)
#_, pret[mid]= prisk_direct_bysamples(ret[mid][0][1], ret[mid][0][2])
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
#simulation runs for all test ts
#context_ratio -> only evaluate on part of the testset
startlap = 10
if gvar.test_context_ratio > 0:
maxlap = np.max(dfout['startlap'].values)
minlap = np.min(dfout['startlap'].values)
startlap = int(maxlap * gvar.test_context_ratio)
dfout = dfout[dfout['startlap']>startlap]
print('evalate on dfout, min/max=',minlap, maxlap, ' startlap = ', startlap, ' size=', len(dfout))
accret = stint.get_evalret_shortterm(dfout)[0]
#fsamples, ftss = runs2samples_ex(ranknet_ret[f'oracle-RANK-{year}-inlap-nopitage'],[])
#_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([year,f'{testmodel}',configname,'all', accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
for laptype in ['normal','pit']:
# select the set
pitcoveredlaps = pitdata[_test_event][1]
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - pitcoveredlaps
if laptype == 'normal':
sellaps = normallaps
clearlaps = pitcoveredlaps
else:
sellaps = pitcoveredlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-plen-1 for x in sellaps]
#sellapidx = np.array([x-1 for x in sellaps])
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
#oracle
#outfile=f'shortterm-dfout-ranknet-indy500-rank-inlap-nopitage-20182019-oracle-l10-alldata-weighted.pickle'
#_all = load_dfout_all(outfile)[0]
#ranknetdf, acc, ret, pret = _all[0],_all[1],_all[2],_all[3]
dfout = do_rerank(ranknetdf[year][f'{testmodel}_mean'])
allsamples, alltss = get_allsamples(dfx, year=year)
allsamples, alltss = clear_samples(allsamples, alltss,clearidx)
_, prisk_vals = prisk_direct_bysamples(allsamples, alltss)
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret_shortterm(dfout)[0]
print(year, laptype,f'RankNet-{testmodel}',accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2])
retdata.append([year, f'{testmodel}',configname,laptype, accret[0], accret[4], accret[1], prisk_vals[1], prisk_vals[2]])
##-------------------------------------------------------------------------------
elif _forecast_mode == 'stint':
if testmodel == 'oracle':
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-oracle-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-oracle-t0-tuned.pickle'
else:
#datafile=f'stint-dfout-mlmodels-indy500-tr2013_2017-te2018_2019-end1-normal-t0-tuned.pickle'
datafile=f'{dataroot}/stint-dfout-mlmodels-{version}-end1-normal-t0-tuned.pickle'
#preddf = load_dfout(outfile)
with open(datafile, 'rb') as f:
preddf = pickle.load(f, encoding='latin1')[0]
#preddf_oracle = load_dfout(outfile)
ranknet_ret = ret
#discard old year
#year <- _test_event
errlist = {}
errcnt, errlist[year] = cmp_df(ranknetdf[year][f'{testmodel}_mean'], preddf[_test_event]['lasso'])
pitlaps, cautionlaps = get_racestatus_all(rankdata)
retdata = []
#
# Model,SignAcc,MAE,50-Risk,90-Risk
#
cols = ['Year','Model','ExpID','laptype','SignAcc','MAE','50-Risk','90-Risk']
models = {'currank':'CurRank','rf':'RandomForest','svr_lin':'SVM','xgb':'XGBoost'}
for clf in ['currank','rf','svr_lin','xgb']:
print('year:',year,'clf:',clf)
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
fsamples, ftss = df2samples_ex(dfout)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,models[clf],configname,'all', accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
retdata.append([_test_event,f'{testmodel}',configname,'all',accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# split evaluation
if True:
for laptype in ['normalpit','cautionpit']:
# select the set
gvar.maxlap = get_event_info(_test_event)[2]
normallaps = set([x for x in range(1, gvar.maxlap + 1)]) - set(cautionlaps)
if laptype == 'normalpit':
sellaps = normallaps
clearlaps = cautionlaps
else:
sellaps = cautionlaps
clearlaps = normallaps
# pitcoveredlaps start idx = 1
startlaps = [x-1 for x in sellaps]
clearidx = np.array([x-1 for x in clearlaps])
print('sellaps:', len(sellaps), 'clearlaps:',len(clearlaps))
# evaluation start
for clf in ['currank','rf','svr_lin','xgb']:
dfout, accret = eval_sync(preddf[_test_event][clf],errlist[year])
#debug
if clf == 'currank':
print('currank min startlap:', np.min(dfout.startlap.values))
print('currank startlaps:', dfout.startlap.values)
print('currank endlaps:', dfout.endlap.values)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
dfout = dfout[dfout['startlap'].isin(startlaps)]
accret = stint.get_evalret(dfout)[0]
fsamples, ftss = df2samples_ex(dfout)
#fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,models[clf],configname,laptype, accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
dfout, accret = eval_sync(ranknetdf[year][f'{testmodel}_mean'], errlist[year],force2int=True)
print('ranknet min startlap:', np.min(dfout.startlap.values))
print('ranknet startlaps:', dfout.startlap.values)
print('ranknet endlaps:', sorted(set(list((dfout.endlap.values)))))
print('sel laps::', startlaps)
print('clear laps::', clearidx)
print('cautionlaps:', cautionlaps)
dfoutx = dfout[dfout['startlap'].isin(clearidx)]
#dfoutx = dfout[dfout['endlap'].isin(clearidx)]
print('matched cleared endlaps::', sorted(set(list((dfoutx.endlap.values)))))
dfout = dfout[dfout['startlap'].isin(startlaps)]
#dfout = dfout[dfout['endlap'].isin(startlaps)]
print('matched endlaps::', sorted(set(list((dfout.endlap.values)))))
accret = stint.get_evalret(dfout)[0]
#fsamples, ftss = df2samples(dfout)
fsamples, ftss = runs2samples(ranknet_ret[mid],errlist[f'{year}'])
fsamples, ftss = clear_samples(fsamples, ftss, clearidx)
_, prisk_vals = prisk_direct_bysamples(fsamples, ftss)
#dfout = dfout[dfout['endlap'].isin(startlaps)]
#accret = stint.get_evalret(dfout)[0]
retdata.append([_test_event,f'{testmodel}',configname,laptype,accret[0], accret[1], prisk_vals[1], prisk_vals[2]])
# end of evaluation
oracle_eval_result = pd.DataFrame(data=retdata, columns=cols)
if _savedata:
oracle_eval_result.to_csv(EVALUATION_RESULT_DF)
# ### 6. Draw forecasting results
# In[12]:
if _forecast_mode == 'shortterm' and _joint_train == False:
if _skip_overwrite and os.path.exists(LONG_FORECASTING_DFS):
fname = LONG_FORECASTING_DFS
print('Load Long Forecasting Data:',fname)
with open(fname, 'rb') as f:
alldata = pickle.load(f, encoding='latin1')
print('.......loaded data, alldata keys=', alldata.keys())
else:
oracle_ret = ret
mid = f'{testmodel}-%s-%s-%s-%s'%(runts, year, inlapstr[lapmode], cur_featurestr)
print('eval mid:', mid, f'{testmodel}_ret keys:', ret.keys())
## init predictor
_predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
oracle_dfout = do_rerank(dfs[year][f'{testmodel}_mean'])
carlist = set(list(oracle_dfout.carno.values))
carlist = [int(x) for x in carlist]
print('carlist:', carlist,'len:',len(carlist))
#carlist = [13, 7, 3, 12]
#carlist = [13]
retdata = {}
for carno in carlist:
print("*"*40)
print('Run models for carno=', carno)
# create the test_ds first
test_cars = [carno]
#train_ds, test_ds, trainset, testset = stint.make_dataset_byevent(events_id[_test_event],
# prediction_length,freq,
# oracle_mode=stint.MODE_ORACLE,
# run_ts = _run_ts,
# test_event = _test_event,
# test_cars=test_cars,
# half_moving_win = 0,
# train_ratio = 0.01)
train_ds, test_ds, trainset, testset = make_dataset_byevent(prepared_laptimedata, prediction_length,freq,
useeid=useeid, run_ts=_run_ts,
test_event=_test_event, log_transform =False,
context_ratio=context_ratio, train_ratio = 0,
joint_train = _joint_train,
test_cars = test_cars)
if (len(testset) <= 10 + prediction_length):
print('ts too short, skip ', len(testset))
continue
#by first run samples
samples = oracle_ret[mid][0][1][test_cars[0]]
tss = oracle_ret[mid][0][2][test_cars[0]]
target_oracle1, tss_oracle1 = long_predict_bysamples('1run-samples', samples, tss, test_ds, _predictor)
#by first run output df(_use_mean = true, already reranked)
df = oracle_ret[mid][0][0]
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle2, tss_oracle2 = long_predict_bydf(f'{testmodel}-1run-dfout', dfin_oracle, test_ds, _predictor)
#by multi-run mean at oracle_dfout
df = oracle_dfout
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle3, tss_oracle3 = long_predict_bydf(f'{testmodel}-multimean', dfin_oracle, test_ds, _predictor)
#no rerank
df = ranknetdf[year][f'{testmodel}_mean']
dfin_oracle = df[df['carno']==test_cars[0]]
target_oracle4, tss_oracle4 = long_predict_bydf(f'{testmodel}-norerank-multimean', dfin_oracle, test_ds, _predictor)
#by multiple runs
target_oracle_multirun, tss_oracle_multirun = get_ranknet_multirun(
oracle_ret[mid],
test_cars[0], test_ds, _predictor,sampleCnt=loopcnt)
retdata[carno] = [[tss_oracle1,tss_oracle2,tss_oracle3,tss_oracle4,tss_oracle_multirun],
[target_oracle1,target_oracle2,target_oracle3,target_oracle4,target_oracle_multirun]]
alldata = retdata
if _savedata:
with open(LONG_FORECASTING_DFS, 'wb') as f:
pickle.dump(alldata, f, pickle.HIGHEST_PROTOCOL)
# In[13]:
if _draw_figs:
if _forecast_mode == 'shortterm' and _joint_train == False:
destdir = FORECAST_FIGS_DIR
if _skip_overwrite and os.path.exists(destdir):
print('Long Forecasting Figures at:',destdir)
else:
#with open(STAGE_DATASET, 'rb') as f:
# stagedata = pickle.load(f, encoding='latin1')
# _alldata, rankdata, _acldata, _flagdata = stagedata[_test_event]
#set gobal variable
gvar.rankdata = rankdata
#destdir = outputRoot + 'oracle-forecast-figs/'
os.makedirs(destdir, exist_ok=True)
for carno in alldata:
plotoracle(alldata, carno, destdir)
#draw summary result
outputfile = destdir + f'{configname}'
plotallcars(alldata, outputfile, drawid = 0)
# final output
pd.set_option("display.max_rows", None, "display.max_columns", None)
print(oracle_eval_result)
| 39,999 | 36.209302 | 185 | py |
rankpredictor | rankpredictor-master/run/13.FullTest/notebook/laptime2rank-evaluate-fulltest-Copy1.py | #!/usr/bin/env python
# coding: utf-8
# ## Laptime2Rank-evaluate-fulltest-disturbance
#
# based on: Laptime2Rank-evaluate-fulltest
#
# rank prediction by laptime forecasting models
#
# support:
# + train/test split by ratio or event
# + incremental training evaluation(adjust ratio)
# + go beyond curtrack and zerotrack by modeling the track status
# + halfwin mode(0:no, 1:halfwin, 2:continous)
# + split by stage, support all events (todo)
#
# + disturbance analysis by adding disturbance to oracle trackstatus and lapstatus
#
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import mxnet as mx
from mxnet import gluon
import pickle
import json
import random
from scipy import stats
from sklearn.metrics import mean_squared_error
from gluonts.dataset.common import ListDataset
from gluonts.dataset.util import to_pandas
from pathlib import Path
from gluonts.model.deepar import DeepAREstimator
from gluonts.model.deep_factor import DeepFactorEstimator
from gluonts.model.deepstate import DeepStateEstimator
from gluonts.trainer import Trainer
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.evaluation import Evaluator, MultivariateEvaluator
from gluonts.distribution.multivariate_gaussian import MultivariateGaussianOutput
from gluonts.model.predictor import Predictor
from gluonts.model.prophet import ProphetPredictor
from gluonts.model.r_forecast import RForecastPredictor
from indycar.model.NaivePredictor import NaivePredictor
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# In[2]:
import os
random.seed()
os.getcwd()
#GPUID = 1
# In[3]:
def load_data(event, year):
inputfile = '../data/final/C_'+ event +'-' + year + '-final.csv'
outputprefix = year +'-' + event + '-'
dataset = pd.read_csv(inputfile)
#dataset.info(verbose=True)
final_lap = max(dataset.completed_laps)
total_laps = final_lap + 1
# get records for the cars that finish the race
completed_car_numbers= dataset[dataset.completed_laps == final_lap].car_number.values
completed_car_count = len(completed_car_numbers)
#print('count of completed cars:', completed_car_count)
#print('completed cars:', completed_car_numbers)
#make a copy
alldata = dataset.copy()
dataset = dataset[dataset['car_number'].isin(completed_car_numbers)]
rankdata = alldata.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
cldata = make_cl_data(dataset)
acldata = make_cl_data(alldata)
return alldata, rankdata, acldata
# make indy car completed_laps dataset
# car_number, completed_laps, rank, elapsed_time, rank_diff, elapsed_time_diff
def make_cl_data(dataset):
# pick up data with valid rank
rankdata = dataset.rename_axis('MyIdx').sort_values(by=['elapsed_time','MyIdx'], ascending=True)
rankdata = rankdata.drop_duplicates(subset=['car_number', 'completed_laps'], keep='first')
# resort by car_number, lap
uni_ds = rankdata.sort_values(by=['car_number', 'completed_laps', 'elapsed_time'], ascending=True)
#uni_ds = uni_ds.drop(["unique_id", "best_lap", "current_status", "track_status", "lap_status",
# "laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
# "last_pitted_lap","start_position","laps_led"], axis=1)
uni_ds = uni_ds.drop(["unique_id", "best_lap",
"laps_behind_leade","laps_behind_prec","overall_rank","pit_stop_count",
"last_pitted_lap","start_position","laps_led"], axis=1)
carnumber = set(uni_ds['car_number'])
#print('cars:', carnumber)
#print('#cars=', len(carnumber))
# faster solution , uni_ds already sorted by car_number and lap
uni_ds['rank_diff'] = uni_ds['rank'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['rank_diff'][mask] = 0
uni_ds['time_diff'] = uni_ds['elapsed_time'].diff()
mask = uni_ds.car_number != uni_ds.car_number.shift(1)
uni_ds['time_diff'][mask] = 0
#df = uni_ds[['car_number','completed_laps','rank','elapsed_time','rank_diff','time_diff']]
df = uni_ds[['car_number','completed_laps','rank','elapsed_time',
'rank_diff','time_diff',"current_status", "track_status", "lap_status"]]
return df
# In[4]:
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def test_flag(a, bitflag):
return (a & bitflag) == bitflag
#
# remove NaN at the tail
# there should be no nans in the middle of the ts
COL_LAPTIME=0
COL_RANK=1
COL_TRACKSTATUS=2
COL_LAPSTATUS=3
COL_TIMEDIFF=4
COL_CAUTION_LAPS_INSTINT=5
COL_LAPS_INSTINT= 6
MODE_ORACLE = 0
MODE_NOLAP = 1
MODE_NOTRACK = 2
MODE_TESTZERO = 4
MODE_TESTCURTRACK = 8
MODE_PREDTRACK = 16
MODE_PREDPIT = 32
# disturbe analysis
MODE_DISTURB_CLEARTRACK = 64
MODE_DISTURB_ADJUSTTRACK = 128
MODE_DISTURB_ADJUSTPIT = 256
#MODE_STR={MODE_ORACLE:'oracle', MODE_NOLAP:'nolap',MODE_NOTRACK:'notrack',MODE_TEST:'test'}
def make_dataset(runs, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
train_ratio = 0.8,
use_global_dict = True,
oracle_mode = MODE_ORACLE,
test_cars = [],
half_moving_win = True
):
"""
split the ts to train and test part by the ratio
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
"""
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
#eval on carids
if test_cars and (carno not in test_cars):
continue
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
# split and add to dataset record
_train.append({'target': rec[run_ts,:train_len].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:train_len],
rec[COL_LAPSTATUS,:train_len]]
}
)
# multiple test ts(rolling window as half of the prediction_length)
test_rec_cnt = 0
step = -int(prediction_length/2) if half_moving_win else -prediction_length
for endpos in range(max_len, train_len+prediction_length, step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
#'feat_dynamic_real': [rec[COL_TRACKSTATUS,:endpos],
# rec[COL_LAPSTATUS,:endpos]]
}
)
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'prediction_length:{prediction_length},train len:{len(train_set)}, test len:{len(test_set)}')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
# endpos -> vector of prediction_length
_track_pred = {}
_track_true = {}
def init_track_model():
global _track_pred,_track_true
_track_pred = {}
_track_true = {}
def get_track_model(track_rec, endpos, prediction_length, context_len=10):
"""
return the predicted track status
"""
global _track_pred,_track_true
# this is the perfect track model for Indy500 2018
track_model = [6,4,4,5,6,6,4]
if endpos in _track_pred:
return _track_pred[endpos]
else:
#get yflag lap count from the start pred point
yflaplen = 0
for i in range(1, context_len):
if track_rec[- prediction_length - i] == 1:
yflaplen += 1
else:
break
#laps remain, fill into the future
trackpred = np.array([0 for x in range(prediction_length)])
yflap_pred = random.choice(track_model)
if yflaplen > 0 and yflap_pred > yflaplen:
trackpred[:(yflap_pred - yflaplen)] = 1
_track_pred[endpos] = trackpred
_track_true[endpos] = track_rec[- prediction_length:].copy()
return trackpred
# endpos -> vector of prediction_length
_track_adjust = {}
def init_adjust_track_model():
global _track_adjust
_track_adjust = {}
def adjust_track_model(track_rec, endpos, prediction_length, tailpos):
"""
input:
tailpos ; <0 end pos of 1
return the predicted track status
"""
global _track_adjust
# this is the perfect track model for Indy500 2018
track_model = [-1,0,1]
if endpos in _track_adjust:
return _track_adjust[endpos]
else:
yflap_adjust = random.choice(track_model)
#laps remain, fill into the future
trackadjust = track_rec[-prediction_length:].copy()
if yflap_adjust == -1:
trackadjust[tailpos] = 0
elif yflap_adjust == 1:
trackadjust[tailpos] = 0
if (tailpos + 1) <= -1:
trackadjust[tailpos+1] = 1
_track_adjust[endpos] = trackadjust
return trackadjust
def adjust_pit_model(lap_rec, endpos, prediction_length):
"""
input:
tailpos ; <0 end pos of 1
return the predicted lap status
"""
adjust_model = [-1,0,1]
lap_adjust = random.choice(adjust_model)
#laps remain, fill into the future
lapadjust = lap_rec[-prediction_length:].copy()
for pos in range(0, prediction_length):
if lapadjust[pos] == 1:
# adjust this pit lap position
pos_adjust = random.choice(adjust_model)
if pos_adjust == -1:
if (pos - 1 >= 0):
lapadjust[pos] = 0
lapadjust[pos - 1] = 1
elif pos_adjust == 1:
if (pos + 1 < prediction_length):
lapadjust[pos] = 0
lapadjust[pos + 1] = 1
return lapadjust
# pit model is separate for each car
def get_pit_model(cuation_laps_instint, laps_instint, prediction_length):
"""
return the predicted pit status
"""
# this is the perfect empirical pit model for Indy500 2018
pit_model_all = [[33, 32, 35, 32, 35, 34, 35, 34, 37, 32, 37, 30, 33, 36, 35, 33, 36, 30, 31, 33, 36, 37, 35, 34, 34, 33, 37, 35, 39, 32, 36, 35, 34, 32, 36, 32, 31, 36, 33, 33, 35, 37, 40, 32, 32, 34, 35, 36, 33, 37, 35, 37, 34, 35, 39, 32, 31, 37, 32, 35, 36, 39, 35, 36, 34, 35, 33, 33, 34, 32, 33, 34],
[45, 44, 46, 44, 43, 46, 45, 43, 41, 48, 46, 43, 47, 45, 49, 44, 48, 42, 44, 46, 45, 45, 43, 44, 44, 43, 46]]
pit_model_top8 = [[33, 32, 35, 33, 36, 33, 36, 33, 37, 35, 36, 33, 37, 34],
[46, 45, 43, 48, 46, 45, 45, 43]]
pit_model = pit_model_all
if cuation_laps_instint>10:
#use low model
pred_pit_laps = random.choice(pit_model[0])
else:
pred_pit_laps = random.choice(pit_model[1])
#laps remain, fill into the future
pitpred = np.array([0 for x in range(prediction_length)])
if (pred_pit_laps > laps_instint) and (pred_pit_laps <= laps_instint + prediction_length):
pitpred[pred_pit_laps - laps_instint - 1] = 1
return pitpred
def make_dataset_byevent(runs, prediction_length, freq,
useeid = False,
run_ts=COL_LAPTIME,
test_event = 'Indy500',
test_cars = [],
use_global_dict = True,
oracle_mode = MODE_ORACLE,
half_moving_win = 0,
train_ratio=0.8
):
"""
split the ts to train and test part by the ratio
input:
oracle_mode: false to simulate prediction in real by
set the covariates of track and lap status as nan in the testset
half_moving_win ; extend to 0:-1 ,1:-1/2plen, 2:-plen
"""
init_track_model()
init_adjust_track_model()
start = pd.Timestamp("01-01-2019", freq=freq) # can be different for each time series
train_set = []
test_set = []
#select run
if runs>=0:
_laptime_data = [laptime_data[runs].copy()]
else:
_laptime_data = laptime_data.copy()
#_data: eventid, carids, datalist[carnumbers, features, lapnumber]->[laptime, rank, track, lap]]
for _data in _laptime_data:
_train = []
_test = []
if events[_data[0]] == test_event:
test_mode = True
else:
test_mode = False
#statistics on the ts length
ts_len = [ _entry.shape[1] for _entry in _data[2]]
max_len = int(np.max(ts_len))
train_len = int(np.max(ts_len) * train_ratio)
print(f'====event:{events[_data[0]]}, train_len={train_len}, max_len={np.max(ts_len)}, min_len={np.min(ts_len)}')
# process for each ts
for rowid in range(_data[2].shape[0]):
# rec[features, lapnumber] -> [laptime, rank, track_status, lap_status,timediff]]
rec = _data[2][rowid].copy()
#remove nan(only tails)
nans, x= nan_helper(rec[run_ts,:])
nan_count = np.sum(nans)
rec = rec[:, ~np.isnan(rec[run_ts,:])]
# remove short ts
totallen = rec.shape[1]
if ( totallen < train_len + prediction_length):
print(f'a short ts: carid={_data[1][rowid]},len={totallen}')
continue
if use_global_dict:
carno = _data[1][rowid]
carid = global_carids[_data[1][rowid]]
else:
#simulation dataset, todo, fix the carids as decoder
carno = rowid
carid = rowid
if useeid:
static_cat = [carid, _data[0]]
else:
static_cat = [carid]
# selection of features
if test_flag(oracle_mode, MODE_NOTRACK):
rec[COL_TRACKSTATUS, :] = 0
if test_flag(oracle_mode, MODE_NOLAP):
rec[COL_LAPSTATUS, :] = 0
test_rec_cnt = 0
if not test_mode:
# all go to train set
_train.append({'target': rec[run_ts,:].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [rec[COL_TRACKSTATUS,:],
rec[COL_LAPSTATUS,:]]
}
)
else:
# reset train_len
#context_len = prediction_length*2
#if context_len < 10:
# context_len = 10
context_len = train_len
# multiple test ts(rolling window as half of the prediction_length)
#step = -int(prediction_length/2) if half_moving_win else -prediction_length
if half_moving_win == 1:
step = -int(prediction_length/2)
elif half_moving_win == 2:
step = -prediction_length
else:
step = -1
#bug fix, fixed the split point for all cars/ts
for endpos in range(max_len, context_len+prediction_length,
step):
#check if enough for this ts
if endpos > totallen:
continue
track_rec = rec[COL_TRACKSTATUS, :endpos].copy()
lap_rec = rec[COL_LAPSTATUS, :endpos].copy()
caution_laps_instint = int(rec[COL_CAUTION_LAPS_INSTINT, endpos -prediction_length - 1])
laps_instint = int(rec[COL_LAPS_INSTINT, endpos -prediction_length - 1])
# test mode
if test_flag(oracle_mode, MODE_TESTCURTRACK):
# since nan does not work, use cur-val instead
track_rec[-prediction_length:] = track_rec[-prediction_length - 1]
#track_rec[-prediction_length:] = random.randint(0,1)
#lap_rec[-prediction_length:] = lap_rec[-prediction_length - 1]
lap_rec[-prediction_length:] = 0
elif test_flag(oracle_mode, MODE_PREDTRACK):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = 0
elif test_flag(oracle_mode, MODE_PREDPIT):
predrec = get_track_model(track_rec, endpos, prediction_length)
track_rec[-prediction_length:] = predrec
lap_rec[-prediction_length:] = get_pit_model(caution_laps_instint,
laps_instint,prediction_length)
elif test_flag(oracle_mode, MODE_TESTZERO):
#set prediction part as nan
#track_rec[-prediction_length:] = np.nan
#lap_rec[-prediction_length:] = np.nan
track_rec[-prediction_length:] = 0
lap_rec[-prediction_length:] = 0
# disturbe analysis
if test_flag(oracle_mode, MODE_DISTURB_CLEARTRACK):
# clear the oracle track status
# future 1s in trackstatus
# pattern like 0 1 xx
for _pos in range(-prediction_length + 1, -1):
if track_rec[_pos - 1] == 0:
track_rec[_pos] = 0
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTTRACK):
# adjust the end position of track, or caution lap length
# find the end of caution laps
_tail = 0
for _pos in range(-1,-prediction_length + 1,-1):
if track_rec[_pos] == 1:
#find the tail
_tail = _pos
break
if _tail != 0:
#found
adjustrec = adjust_track_model(track_rec, endpos, prediction_length, _tail)
track_rec[-prediction_length:] = adjustrec
if test_flag(oracle_mode, MODE_DISTURB_ADJUSTPIT):
# adjust the position of pit
if np.sum(lap_rec[-prediction_length:]) > 0:
adjustrec = adjust_pit_model(lap_rec, endpos, prediction_length)
lap_rec[-prediction_length:] = adjustrec
_test.append({'target': rec[run_ts,:endpos].astype(np.float32),
'start': start,
'feat_static_cat': static_cat,
'feat_dynamic_real': [track_rec,lap_rec]
#'feat_dynamic_real': [rec[COL_TRACKSTATUS,:endpos],
# rec[COL_LAPSTATUS,:endpos]]
}
)
test_rec_cnt += 1
#add one ts
print(f'carno:{carno}, totallen:{totallen}, nancount:{nan_count}, test_reccnt:{test_rec_cnt}')
train_set.extend(_train)
test_set.extend(_test)
print(f'train len:{len(train_set)}, test len:{len(test_set)}')
train_ds = ListDataset(train_set, freq=freq)
test_ds = ListDataset(test_set, freq=freq)
return train_ds, test_ds, train_set, test_set
def save_dataset(datafile,freq, prediction_length, cardinality, train_ds, test_ds):
with open(datafile, 'wb') as f:
#pack [global_carids, laptime_data]
savedata = [freq, prediction_length, cardinality, train_ds, test_ds]
#savedata = [freq, train_set, test_set]
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
# ### test for Indy500
# In[5]:
def predict(test_ds,predictor):
forecast_it, ts_it = make_evaluation_predictions(
dataset=test_ds, # test dataset
predictor=predictor, # predictor
num_samples=100, # number of sample paths we want for evaluation
)
forecasts = list(forecast_it)
tss = list(ts_it)
print(f'tss len={len(tss)}, forecasts len={len(forecasts)}')
return tss, forecasts
def run_prediction_ex(test_ds, prediction_length, model_name,trainid):
with mx.Context(mx.gpu(7)):
pred_ret = []
rootdir = f'../models/remote/laptime-{trainid}/'
# deepAR-Oracle
if model_name == 'curtrack':
model=f'deepAR-Oracle-laptime-curtrack-indy-f1min-t{prediction_length}-e1000-r1_curtrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
elif model_name == 'zerotrack':
model=f'deepAR-Oracle-laptime-nolap-zerotrack-indy-f1min-t{prediction_length}-e1000-r1_zerotrack_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR-Oracle
elif model_name == 'oracle':
model=f'deepAR-Oracle-laptime-all-indy-f1min-t{prediction_length}-e1000-r1_oracle_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# deepAR
elif model_name == 'deepAR':
model=f'deepAR-laptime-all-indy-f1min-t{prediction_length}-e1000-r1_deepar_t{prediction_length}'
modeldir = rootdir + model
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = Predictor.deserialize(Path(modeldir))
print(f'loading model...done!, ctx:{predictor.ctx}')
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# naive
elif model_name == 'naive':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = NaivePredictor(freq= freq, prediction_length = prediction_length)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
# arima
elif model_name == 'arima':
print(f'predicting model={model_name}, plen={prediction_length}')
predictor = RForecastPredictor(method_name='arima',freq= freq,
prediction_length = prediction_length,trunc_length=60)
tss, forecasts = predict(test_ds,predictor)
pred_ret = [tss, forecasts]
else:
print(f'error: model {model_name} not support yet!')
return pred_ret
# In[6]:
#calc rank
def eval_rank_bytimediff(test_ds,tss,forecasts,prediction_length):
"""
timediff models
works for one event only
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
#forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
timediff_array = tss[idx].values.copy()
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
forecasts_et[completed_laps][carno] = [timediff_array[-prediction_len:].copy(),
forecast_laptime_mean.copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
time_diff = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
time_diff[0, carid, :] = forecasts_et[lap][carno][0].reshape((prediction_len))
time_diff[1, carid, :] = forecasts_et[lap][carno][1].reshape((prediction_len))
#calculate rank
idx = np.argsort(time_diff[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(time_diff[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, time_diff, true_rank, pred_rank])
return rank_ret,forecasts_et
#calc rank
def eval_rank(test_ds,tss,forecasts,prediction_length, start_offset):
"""
evaluate rank by laptime forecasting
input:
test_ds ; must be test set for a single event, because test_ds itself does not
contain features to identify the eventid
start_offset[]; elapsed time for lap0, for one specific event
tss,forecasts ; forecast result
prediction_length ;
return:
rank_ret ; [lap, elapsed_time, true_rank, pred_rank]
forecasts_et ; {[completed_laps][carno]} ->(elapsed_time, elapsed_time_pred)
"""
carlist = []
# carno-lap# -> elapsed_time[] array
forecasts_et = dict()
ds_iter = iter(test_ds)
for idx in range(len(test_ds)):
test_rec = next(ds_iter)
#global carid
carno = decode_carids[test_rec['feat_static_cat'][0]]
#print('car no:', carno)
if carno not in carlist:
carlist.append(carno)
#start_offset is global var
offset = start_offset[(start_offset['car_number']==carno)].elapsed_time.values[0]
#print('start_offset:', offset)
# calc elapsed time
prediction_len = forecasts[idx].samples.shape[1]
if prediction_length != prediction_len:
print('error: prediction_len does not match, {prediction_length}:{prediction_len}')
return []
forecast_laptime_mean = np.mean(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
#forecast_laptime_mean = np.median(forecasts[idx].samples, axis=0).reshape((prediction_len,1))
laptime_array = tss[idx].values.copy()
elapsed_time = np.cumsum(laptime_array) + offset
laptime_array = tss[idx].values.copy()
laptime_array[-prediction_len:] = forecast_laptime_mean
elapsed_time_hat = np.cumsum(laptime_array) + offset
#save the prediction
completed_laps = len(tss[idx]) - prediction_len + 1
#print('car no:', carno, 'completed_laps:', completed_laps)
#key = '%s-%s'%(carno, completed_laps)
#forecasts_et[key] = elapsed_time[-prediction_len:].copy()
if completed_laps not in forecasts_et:
forecasts_et[completed_laps] = {}
#forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len-1:].copy(),
# elapsed_time_hat[-prediction_len-1:].copy()]
forecasts_et[completed_laps][carno] = [elapsed_time[-prediction_len:].copy(),
elapsed_time_hat[-prediction_len:].copy()]
# calc rank
rank_ret = []
for lap in forecasts_et.keys():
#get car list for this lap
carlist = list(forecasts_et[lap].keys())
#print('carlist:', carlist)
caridmap={key:idx for idx, key in enumerate(carlist)}
#fill in data
#elapsed_time = np.zeros((2, len(carlist), prediction_len+1))
elapsed_time = np.zeros((2, len(carlist), prediction_len))
for carno in carlist:
carid = caridmap[carno]
elapsed_time[0, carid, :] = forecasts_et[lap][carno][0]
elapsed_time[1, carid, :] = forecasts_et[lap][carno][1]
#calculate rank
idx = np.argsort(elapsed_time[0], axis=0)
true_rank = np.argsort(idx, axis=0)
idx = np.argsort(elapsed_time[1], axis=0)
pred_rank = np.argsort(idx, axis=0)
rank_ret.append([lap, elapsed_time, true_rank, pred_rank])
return rank_ret,forecasts_et
def get_acc(rank_ret,prediction_length):
"""
input:
rank_ret: [lap, elapsed_time, true_rank, pred_rank], use [2][3] columns
return:
((metrics...)
(record count...))
the result can be used to calculate micro/macro metrics
"""
# evaluate
#top1 accuracy
top1acc = 0
top1acc_farmost = 0
top5acc = 0
top5acc_farmost = 0
tau = 0
rmse = 0.
for rec in rank_ret:
trueRank = rec[2]
predRank = rec[3]
#top1 , rank = 0, first col is not prediction
top1acc += np.sum((trueRank==0) & (predRank==0))
top1acc_farmost += np.sum((trueRank[:,-1]==0) & (predRank[:,-1]==0))
#top5
top5acc += np.sum((trueRank<5) & (predRank<5))
top5acc_farmost += np.sum((trueRank[:,-1]<5) & (predRank[:,-1]<5))
# tau
tao, _ = stats.kendalltau(trueRank, predRank)
tau += tao
#rmse
rmse += mean_squared_error(predRank,trueRank)
recnt = len(rank_ret)
top1acc = top1acc *1.0/ (recnt*prediction_length)
top1acc_farmost = top1acc_farmost *1.0/ recnt
top5acc = top5acc *1.0/ (5*recnt*prediction_length)
top5acc_farmost = top5acc_farmost *1.0/ (5*recnt)
tau = tau/recnt
rmse = rmse/recnt
print(f'total:{len(rank_ret)}, prediction_length:{prediction_length}')
print('top1acc=', top1acc,
'top1acc_farmost=', top1acc_farmost,
'top5acc=', top5acc,
'top5acc_farmost=', top5acc_farmost,
)
print('tau = ', tau,
'rmse = ', rmse)
return ((top1acc,top1acc_farmost,top5acc,top5acc_farmost,tau,rmse),
(recnt*prediction_length,recnt,5*recnt*prediction_length,5*recnt,recnt,recnt))
# In[7]:
def run_exp(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle','deepAR','naive']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_ORACLE,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
# special model with test_ds
models = ['curtrack']
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_TESTCURTRACK,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
# zerotrack
models = ['zerotrack']
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_TESTZERO,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
def run_exp_predtrack(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle','curtrack','zerotrack']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_PREDTRACK,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
def run_exp_predpit(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle','curtrack','zerotrack']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_PREDPIT,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
# In[8]:
#MODE_DISTURB_CLEARTRACK = 64
#MODE_DISTURB_ADJUSTTRACK = 128
#MODE_DISTURB_ADJUSTPIT = 256
def run_exp_cleartrack(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_DISTURB_CLEARTRACK,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
def run_exp_adjusttrack(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_DISTURB_CLEARTRACK + MODE_DISTURB_ADJUSTTRACK,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
def run_exp_adjustpit(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_DISTURB_ADJUSTPIT,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
def run_exp_adjustall(prediction_length, half_moving_win, train_ratio=0.8, trainid="r0.8",
test_event='Indy500', test_cars = []):
"""
dependency: test_event, test on one event only
"""
models = ['oracle']
#,'arima']
retdf = []
pred_ret = {}
ds_ret = {}
### create test dataset
train_ds, test_ds,_,_ = make_dataset_byevent(events_id[test_event], prediction_length,freq,
oracle_mode=MODE_DISTURB_CLEARTRACK +MODE_DISTURB_ADJUSTPIT +MODE_DISTURB_ADJUSTTRACK,
run_ts = COL_LAPTIME,
test_cars=test_cars,
half_moving_win= half_moving_win,
train_ratio=train_ratio)
for model in models:
print('model:', model)
tss, forecasts = run_prediction_ex(test_ds, prediction_length, model,trainid=trainid)
pred_ret[model] = [tss, forecasts]
ds_ret[model] = test_ds
rank_ret,_ = eval_rank(test_ds,tss,forecasts,prediction_length,global_start_offset[test_event])
metrics = get_acc(rank_ret,prediction_length)
ret = [model, prediction_length, half_moving_win,trainid]
ret.extend(metrics[0])
retdf.append(ret)
return pred_ret, ds_ret, retdf
# ### init
# In[ ]:
# In[9]:
#
# parameters
#
#year = '2017'
year = '2018'
#event = 'Toronto'
#https://www.racing-reference.info/season-stats/2018/O/#
events_totalmiles=[256,500,372,268,500,310]
events_laplen = [1.022,2.5,1.5,0.894,2.5,1.25]
events = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
#events = ['Gateway']
#events = ['Indy500']
#events = ['Phoenix']
events_id={key:idx for idx, key in enumerate(events)}
#works for only one event
# In[10]:
stagedata = {}
global_start_offset = {}
global_carids = {}
traindata = None
cur_carid = 0
for event in events:
#alldata, rankdata, acldata, flagdata
stagedata[event] = load_data(event, year)
alldata, rankdata, acldata = stagedata[event]
#carlist = set(acldata['car_number'])
#laplist = set(acldata['completed_laps'])
#print('%s: carno=%d, lapnum=%d'%(event, len(carlist), len(laplist)))
#offset
global_start_offset[event] = rankdata[rankdata['completed_laps']==0][['car_number','elapsed_time']]
#build the carid map
#for car in carlist:
# if car not in global_carids:
# global_carids[car] = cur_carid
# cur_carid += 1
# In[11]:
# start from here
import pickle
with open('laptime_rank_timediff_pit-oracle-%s.pickle'%year, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
global_carids, laptime_data = pickle.load(f, encoding='latin1')
# In[12]:
freq = "1min"
#decode global_carids
decode_carids={carid:carno for carno, carid in global_carids.items()}
#useeid = False
#interpolate = False
#ipstr = '-ip' if interpolate else '-noip'
#ipstr = '%s-%s'%('ip' if interpolate else 'noip', 'eid' if useeid else 'noeid')
#if useeid:
# cardinality = [len(global_carids), len(laptime_data)]
#else:
# cardinality = [len(global_carids)]
# In[13]:
def run_test(runs, plens, half, trainids, train_ratio, testfunc):
"""
input:
plens=[2,5,10]
half=[False]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5"]
#half=[True,False]
#plens=[2]
runs = 5
train_ratio=0.5
exp_id='mean-splitbystage-predpit'
testfunc ; run_exp_predpit, run_exp_predtrack, run_exp ...
return:
dfret ; average result of multiple runs
dataframe['model' , 'prediction_length', 'halfmode','trainid',
'top1acc','top1acc_farmost','top5acc','top5acc_farmost','tau','rmse',
'top1acc_std','top1acc_farmost_std','top5acc_std','top5acc_farmost_std','tau_std','rmse_std']
"""
if plens == [] or half == [] or trainids == []:
print("error with empty settings")
return
allret = []
for runid in range(runs):
exp_data = []
exp_result = []
for halfmode in half:
for plen in plens:
for trainid in trainids:
print('='*20)
pred_ret, test_ds, metric_ret = testfunc(plen, halfmode,
train_ratio=train_ratio,
trainid=trainid)
#save
exp_data.append((pred_ret, test_ds))
exp_result.extend(metric_ret)
#save result
result = pd.DataFrame(exp_result, columns = ['model' , 'prediction_length', 'halfmode',
'trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
#result['runid'] = [runid for x in range(len(result))]
allret.append(result)
#final
rowcnt = len(allret[0])
metrics = np.empty((runs, rowcnt, 6))
for runid, ret in enumerate(allret):
metrics[runid, :,:] = ret[['top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
#average
averagemat = np.mean(metrics[:,:,:], axis=0)
stdmat = np.std(metrics[:,:,:], axis=0)
result = allret[0][['model' , 'prediction_length', 'halfmode',
'trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse']].values
result[:,4:] = averagemat
dfret = pd.DataFrame(result, columns = ['model' , 'prediction_length', 'halfmode',
'trainid',
'top1acc','top1acc_farmost','top5acc',
'top5acc_farmost','tau','rmse'])
dfstd = pd.DataFrame(stdmat, columns = ['top1acc_std','top1acc_farmost_std','top5acc_std',
'top5acc_farmost_std','tau_std','rmse_std'])
dfret = pd.concat([dfret, dfstd], axis=1)
dfret.to_csv(f'laptime2rank-evaluate-indy500-{exp_id}-result.csv', float_format='%.3f')
return dfret
# ### loop test
# In[14]:
test_result = {}
# In[ ]:
#half=[True, False]
#plens=[2,5,10,20,30]
plens=[2,5,10]
half=[0]
trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
#trainids = ["r0.5","r0.6"]
runs = 5
train_ratio=0.4
#trainids = ["indy500"]
#runs = 1
#plens=[2]
#testfunc = run_exp_cleartrack
#exp_id=f'mean-splitbyevent-adjustcleartrack-r{runs}-t{train_ratio}'
#test_result[exp_id] = run_test(runs, plens, half, trainids, train_ratio, testfunc)
#
#testfunc = run_exp_adjusttrack
#exp_id=f'mean-splitbyevent-adjusttrack-r{runs}-t{train_ratio}'
#test_result[exp_id] = run_test(runs, plens, half, trainids, train_ratio, testfunc)
testfunc = run_exp_adjustpit
exp_id=f'mean-splitbyevent-adjustpit-r{runs}-t{train_ratio}'
test_result[exp_id] = run_test(runs, plens, half, trainids, train_ratio, testfunc)
testfunc = run_exp_adjustall
exp_id=f'mean-splitbyevent-adjustall-r{runs}-t{train_ratio}'
test_result[exp_id] = run_test(runs, plens, half, trainids, train_ratio, testfunc)
#testfunc = run_exp
#exp_id=f'mean-splitbyevent-baseline-r{runs}-t{train_ratio}'
#test_result[exp_id] = run_test(runs, plens, half, trainids, train_ratio, testfunc)
# In[ ]:
#half=[True, False]
#plens=[2,5,10,20,30]
plens=[2,5,10]
half=[0]
#trainids = ["indy500-r0.2","indy500-r0.4","indy500"]
trainids = ["r0.5","r0.6","r0.7"]
runs = 5
train_ratio=0.7
# ### test
# In[ ]:
# In[ ]:
| 55,178 | 35.088293 | 310 | py |
rankpredictor | rankpredictor-master/run/5.DeployModel/predictor/stage_model_prediction.py | #!/usr/bin/env python
# coding: utf-8
# ### stage model prediction interface
#
# A stage, or a stint, is the section of laps between two consecutive pitstops for a car.
# The models predict the change of the ranks for the next stage when a car enters into the pit lane(or from the beginning).
#
# There are two prediction models:
#
# 1. sign model to predict the sign of rank change (-1 rank improve, 0 no change, 1 rank goes worse)
# 2. value model to predict the value of rank change (integer number)
#
#
# In[1]:
import random
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model import LassoCV
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model.ridge import RidgeCV
from sklearn.svm import LinearSVC
from sklearn.svm.classes import SVR
# In[2]:
# bulid regression model
classifiers = ['currank','avgrank','dice','lr','lrl1','lsvc','lsvcl2','rf','lrbias','xgb']
def get_classifier(classifier = 'lr'):
class_weight = None
if classifier == "lsvc":
clf = LinearSVC(penalty='l1',dual=False, tol=1e-3, class_weight=class_weight )
elif classifier == "lsvcl2":
clf = LinearSVC(penalty='l2', tol=1e-4, class_weight=class_weight)
elif classifier == 'rf':
#clf = RandomForestClassifier(n_estimators=100, n_jobs=4,criterion='entropy', min_samples_split=1,class_weight = class_weight)
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1,criterion='entropy', class_weight = class_weight)
elif classifier == 'lr':
clf = LogisticRegression(class_weight = class_weight, n_jobs=-1, fit_intercept = False, verbose = 0)
elif classifier == 'lrbias':
clf = LogisticRegression(class_weight = class_weight, n_jobs=-1, fit_intercept = True, verbose = 1)
elif classifier == 'lrl1':
clf = LogisticRegression(class_weight = class_weight, penalty='l1',n_jobs=-1)
elif classifier == 'xgb':
clf = xgb.XGBClassifier(booster = 'gbtree', nthread = -1, subsample = 1,
n_estimators = 600, colsample_bytree = 1, max_depth = 6, min_child_weight = 1)
elif classifier == 'dice':
clf = RandomDice('1234')
elif classifier == 'currank':
clf = CurRank()
elif classifier == 'avgrank':
clf = AverageRank()
else:
clf = None
return clf
# bulid regression model
regressors = ['currank','avgrank','dice','lasso','ridge','rf','svr','xgb']
def get_regressor(regressor = 'lr'):
if regressor == "lasso":
clf = LassoCV(cv=5, random_state=0)
elif regressor == "ridge":
clf = RidgeCV(alphas=np.logspace(-6, 6, 13))
elif regressor == "rf":
clf = RandomForestRegressor(n_estimators=100)
elif regressor == 'svr':
clf = SVR(kernel='rbf')
elif regressor == 'xgb':
clf = xgb.XGBRegressor(objective="reg:linear", random_state=42, max_depth=3)
elif regressor == 'dice':
clf = RandomDice('1234')
elif regressor == 'currank':
clf = CurRank()
elif regressor == 'avgrank':
clf = AverageRank()
else:
clf = None
return clf
class CurRank():
"""
predict with current rank
"""
def __init__(self):
pass
def fit(self, x, y):
pass
def predict(self, test_x):
pred_y = [0 for x in range(test_x.shape[0])]
return np.array(pred_y)
class AverageRank():
"""
print('[*] predict with average rankchg (change_in_rank_all):idx = 15')
change_in_rank_all = test[:,15]
pred_y_avg = np.array([1 if x > 0 else (-1 if x < 0 else 0) for x in change_in_rank_all])
"""
def __init__(self):
pass
def fit(self, x, y):
pass
def predict(self, test_x):
pred_y = []
for x in test_x:
#13, change_in_rank_all
pred_y.append(x[13])
pred_y_avg = np.array([1 if x > 0 else (-1 if x < 0 else 0) for x in pred_y])
return np.array(pred_y_avg)
class RandomDice():
"""
a random dice model
"""
def __init__(self, seed='1234'):
self.dist = []
self.val = []
random.seed(seed)
def fit(self, x, y):
total = y.shape[0]
yval = set(y)
ratio = 0.
for val in yval:
self.val.append(val)
ratio += np.sum(y==val)*1.0 / total
self.dist.append(ratio)
def predict(self, test_x):
pred_y = []
for x in test_x:
dice = random.random()
#search in self.dist
find_idx = -1
for idx, ratio in enumerate(self.dist):
if dice <= ratio:
find_idx = idx
break
#or the last one match
pred_y.append(self.val[find_idx])
return np.array(pred_y)
def evaluate(test_y, pred_y):
precision = metrics.precision_score(test_y, pred_y, average=None)
recall = metrics.recall_score(test_y, pred_y, average=None)
f1 = metrics.f1_score(test_y, pred_y, average=None)
accuracy = metrics.accuracy_score(test_y, pred_y)
print('precision=%s, recall=%s, f1=%s, accuracy=%.2f'%(precision,recall, f1, accuracy))
return accuracy
def classifier_model(name='lr'):
### test learning models
print('[*] predict with %s model'%name)
clf = get_classifier(name)
clf.fit(train_x, train_y)
pred_y = clf.predict(test_x)
score = evaluate(test_y, pred_y)
return score
# In[3]:
#load data
suffix='-withneighbor-newfeatures-timediff'
stagedata = pd.read_csv('stage-2018%s.csv'%suffix)
stagedata.fillna(0, inplace=True)
stagedata.info()
# In[4]:
stagedata.head(5)
# ### load the pre-trained models
# In[5]:
import pickle
eventsname = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events = set(stagedata['eventid'])
#for eventid in events:
eventid = 1 # Indy500
signmodel = 'signmodel-' + eventsname[eventid] + '-lsvc' + '.pkl2'
valuemodel = 'valuemodel-' + eventsname[eventid] + '-lasso' + '.pkl2'
# In[6]:
EMPTY = 100
def predict(carno, stageid):
#
# stageid is the id of pitstop, start from 0
#
#find input x <eventid, car_num, stageid>
input_x = []
for x in test_x:
if ((x[1] == carno) and (x[2] == stageid)):
input_x = x.reshape((1,-1))
pred_y = clf.predict(input_x)
return int(pred_y[0])
else:
return EMPTY
# ### test the sign of rank change prediction
# In[7]:
#load model and predict
with open(valuemodel, 'rb') as fin:
clf, test_x, test_y = pickle.load(fin)
yhat = clf.predict(test_x)
#check carno 12
carno=12
idx = (test_x[:,1]==carno)
_yhat = yhat[idx]
ret_y = []
for stageid in range(10):
Y = predict(carno, stageid)
if Y == EMPTY:
break
ret_y.append(Y)
print('trueth:', test_y[idx])
print('prediction:', _yhat)
print('prediction:', ret_y)
# ### test the value of rank change prediction
# In[8]:
#load model and predict
with open(valuemodel, 'rb') as fin:
clf, test_x, test_y = pickle.load(fin)
yhat = clf.predict(test_x).astype(int)
#check carno 12
carno=12
idx = (test_x[:,1]==carno)
_yhat = yhat[idx]
ret_y = []
for stageid in range(10):
Y = predict(carno, stageid)
if Y == EMPTY:
break
ret_y.append(Y)
#predict(12, 3)
print('trueth:', test_y[idx])
print('prediction:', _yhat)
print('prediction:', ret_y)
# In[ ]:
from flask import Flask, request
api = Flask(__name__)
@api.route('/predict', methods=['GET'])
def get_companies():
car_number = request.args.get("car")
stage = request.args.get("stage")
print("Received request", car_number, stage)
prediction = predict(int(car_number), int(stage))
return str(prediction)
if __name__ == '__main__':
api.run(port=5001)
| 8,013 | 25.448845 | 134 | py |
rankpredictor | rankpredictor-master/run/5.DeployModel/predictor-py2/stage_model_prediction.py | #!/usr/bin/env python
# coding: utf-8
# ### stage model prediction interface
#
# A stage, or a stint, is the section of laps between two consecutive pitstops for a car.
# The models predict the change of the ranks for the next stage when a car enters into the pit lane(or from the beginning).
#
# There are two prediction models:
#
# 1. sign model to predict the sign of rank change (-1 rank improve, 0 no change, 1 rank goes worse)
# 2. value model to predict the value of rank change (integer number)
#
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model import LassoCV
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn import metrics
import xgboost as xgb
import os
# In[2]:
# bulid regression model
classifiers = ['currank','avgrank','dice','lr','lrl1','lsvc','lsvcl2','rf','lrbias','xgb']
def get_classifier(classifier = 'lr'):
class_weight = None
if classifier == "lsvc":
clf = LinearSVC(penalty='l1',dual=False, tol=1e-3, class_weight=class_weight )
elif classifier == "lsvcl2":
clf = LinearSVC(penalty='l2', tol=1e-4, class_weight=class_weight)
elif classifier == 'rf':
#clf = RandomForestClassifier(n_estimators=100, n_jobs=4,criterion='entropy', min_samples_split=1,class_weight = class_weight)
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1,criterion='entropy', class_weight = class_weight)
elif classifier == 'lr':
clf = LogisticRegression(class_weight = class_weight, n_jobs=-1, fit_intercept = False, verbose = 0)
elif classifier == 'lrbias':
clf = LogisticRegression(class_weight = class_weight, n_jobs=-1, fit_intercept = True, verbose = 1)
elif classifier == 'lrl1':
clf = LogisticRegression(class_weight = class_weight, penalty='l1',n_jobs=-1)
elif classifier == 'xgb':
clf = xgb.XGBClassifier(booster = 'gbtree', nthread = -1, subsample = 1,
n_estimators = 600, colsample_bytree = 1, max_depth = 6, min_child_weight = 1)
elif classifier == 'dice':
clf = RandomDice('1234')
elif classifier == 'currank':
clf = CurRank()
elif classifier == 'avgrank':
clf = AverageRank()
else:
clf = None
return clf
# bulid regression model
regressors = ['currank','avgrank','dice','lasso','ridge','rf','svr','xgb']
def get_regressor(regressor = 'lr'):
if regressor == "lasso":
clf = LassoCV(cv=5, random_state=0)
elif regressor == "ridge":
clf = RidgeCV(alphas=np.logspace(-6, 6, 13))
elif regressor == "rf":
clf = RandomForestRegressor(n_estimators=100)
elif regressor == 'svr':
clf = SVR(kernel='rbf')
elif regressor == 'xgb':
clf = xgb.XGBRegressor(objective="reg:linear", random_state=42, max_depth=3)
elif regressor == 'dice':
clf = RandomDice('1234')
elif regressor == 'currank':
clf = CurRank()
elif regressor == 'avgrank':
clf = AverageRank()
else:
clf = None
return clf
class CurRank():
"""
predict with current rank
"""
def __init__(self):
pass
def fit(self, x, y):
pass
def predict(self, test_x):
pred_y = [0 for x in range(test_x.shape[0])]
return np.array(pred_y)
class AverageRank():
"""
print('[*] predict with average rankchg (change_in_rank_all):idx = 15')
change_in_rank_all = test[:,15]
pred_y_avg = np.array([1 if x > 0 else (-1 if x < 0 else 0) for x in change_in_rank_all])
"""
def __init__(self):
pass
def fit(self, x, y):
pass
def predict(self, test_x):
pred_y = []
for x in test_x:
#13, change_in_rank_all
pred_y.append(x[13])
pred_y_avg = np.array([1 if x > 0 else (-1 if x < 0 else 0) for x in pred_y])
return np.array(pred_y_avg)
class RandomDice():
"""
a random dice model
"""
def __init__(self, seed='1234'):
self.dist = []
self.val = []
random.seed(seed)
def fit(self, x, y):
total = y.shape[0]
yval = set(y)
ratio = 0.
for val in yval:
self.val.append(val)
ratio += np.sum(y==val)*1.0 / total
self.dist.append(ratio)
def predict(self, test_x):
pred_y = []
for x in test_x:
dice = random.random()
#search in self.dist
find_idx = -1
for idx, ratio in enumerate(self.dist):
if dice <= ratio:
find_idx = idx
break
#or the last one match
pred_y.append(self.val[find_idx])
return np.array(pred_y)
def evaluate(test_y, pred_y):
precision = metrics.precision_score(test_y, pred_y, average=None)
recall = metrics.recall_score(test_y, pred_y, average=None)
f1 = metrics.f1_score(test_y, pred_y, average=None)
accuracy = metrics.accuracy_score(test_y, pred_y)
print('precision=%s, recall=%s, f1=%s, accuracy=%.2f'%(precision,recall, f1, accuracy))
return accuracy
def classifier_model(name='lr'):
### test learning models
print('[*] predict with %s model'%name)
clf = get_classifier(name)
clf.fit(train_x, train_y)
pred_y = clf.predict(test_x)
score = evaluate(test_y, pred_y)
return score
# In[3]:
#load data
suffix='-withneighbor-newfeatures-timediff'
stagedata = pd.read_csv('stage-2018%s.csv'%suffix)
stagedata.fillna(0, inplace=True)
stagedata.info()
# In[4]:
stagedata.head(5)
# ### load the pre-trained models
# In[5]:
import pickle
eventsname = ['Phoenix','Indy500','Texas','Iowa','Pocono','Gateway']
events = set(stagedata['eventid'])
#for eventid in events:
eventid = 1 # Indy500
signmodel = 'signmodel-' + eventsname[eventid] + '-lsvc' + '.pkl'
valuemodel = 'valuemodel-' + eventsname[eventid] + '-lasso' + '.pkl'
# In[6]:
EMPTY = 100
def predict(carno, stageid):
#
# stageid is the id of pitstop, start from 0
#
#find input x <eventid, car_num, stageid>
input_x = []
for x in test_x:
if ((x[1] == carno) and (x[2] == stageid)):
input_x = x.reshape((1,-1))
pred_y = clf.predict(input_x)
return int(pred_y[0])
else:
return EMPTY
# ### test the sign of rank change prediction
# In[7]:
#load model and predict
with open(signmodel, 'rb') as fin:
clf, test_x, test_y = pickle.load(fin)
yhat = clf.predict(test_x)
#check carno 12
carno=12
idx = (test_x[:,1]==carno)
_yhat = yhat[idx]
ret_y = []
for stageid in range(10):
Y = predict(carno, stageid)
if Y == EMPTY:
break
ret_y.append(Y)
print('trueth:', test_y[idx])
print('prediction:', _yhat)
print('prediction:', ret_y)
# ### test the value of rank change prediction
# In[8]:
#load model and predict
with open(valuemodel, 'rb') as fin:
clf, test_x, test_y = pickle.load(fin)
yhat = clf.predict(test_x).astype(int)
#check carno 12
carno=12
idx = (test_x[:,1]==carno)
_yhat = yhat[idx]
ret_y = []
for stageid in range(10):
Y = predict(carno, stageid)
if Y == EMPTY:
break
ret_y.append(Y)
#predict(12, 3)
print('trueth:', test_y[idx])
print('prediction:', _yhat)
print('prediction:', ret_y)
# In[ ]:
| 7,851 | 25.798635 | 134 | py |
rankpredictor | rankpredictor-master/sub/gluonts-0.5.2/model/forecast.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import re
from enum import Enum
from typing import Dict, List, NamedTuple, Optional, Set, Union, Callable
# Third-party imports
import mxnet as mx
import numpy as np
import pandas as pd
import pydantic
# First-party imports
from gluonts.core.exception import GluonTSUserError
from gluonts.distribution import Distribution
from gluonts.core.component import validated
class Quantile(NamedTuple):
value: float
name: str
@property
def loss_name(self):
return f"QuantileLoss[{self.name}]"
@property
def weighted_loss_name(self):
return f"wQuantileLoss[{self.name}]"
@property
def coverage_name(self):
return f"Coverage[{self.name}]"
@classmethod
def checked(cls, value: float, name: str) -> "Quantile":
if not 0 <= value <= 1:
raise GluonTSUserError(
f"quantile value should be in [0, 1] but found {value}"
)
return Quantile(value, name)
@classmethod
def from_float(cls, quantile: float) -> "Quantile":
assert isinstance(quantile, float)
return cls.checked(value=quantile, name=str(quantile))
@classmethod
def from_str(cls, quantile: str) -> "Quantile":
assert isinstance(quantile, str)
try:
return cls.checked(value=float(quantile), name=quantile)
except ValueError:
m = re.match(r"^p(\d{2})$", quantile)
if m is None:
raise GluonTSUserError(
"Quantile string should be of the form "
f'"p10", "p50", ... or "0.1", "0.5", ... but found {quantile}'
)
else:
quantile_float: float = int(m.group(1)) / 100
return cls(value=quantile_float, name=str(quantile_float))
@classmethod
def parse(cls, quantile: Union["Quantile", float, str]) -> "Quantile":
"""Produces equivalent float and string representation of a given
quantile level.
>>> Quantile.parse(0.1)
Quantile(value=0.1, name='0.1')
>>> Quantile.parse('0.2')
Quantile(value=0.2, name='0.2')
>>> Quantile.parse('0.20')
Quantile(value=0.2, name='0.20')
>>> Quantile.parse('p99')
Quantile(value=0.99, name='0.99')
Parameters
----------
quantile
Quantile, can be a float a str representing a float e.g. '0.1' or a
quantile string of the form 'p0.1'.
Returns
-------
Quantile
A tuple containing both a float and a string representation of the
input quantile level.
"""
if isinstance(quantile, Quantile):
return quantile
elif isinstance(quantile, float):
return cls.from_float(quantile)
else:
return cls.from_str(quantile)
class Forecast:
"""
A abstract class representing predictions.
"""
start_date: pd.Timestamp
freq: str
item_id: Optional[str]
info: Optional[Dict]
prediction_length: int
mean: np.ndarray
_index = None
def quantile(self, q: Union[float, str]) -> np.ndarray:
"""
Computes a quantile from the predicted distribution.
Parameters
----------
q
Quantile to compute.
Returns
-------
numpy.ndarray
Value of the quantile across the prediction range.
"""
raise NotImplementedError()
def quantile_ts(self, q: Union[float, str]) -> pd.Series:
return pd.Series(index=self.index, data=self.quantile(q))
@property
def median(self) -> np.ndarray:
return self.quantile(0.5)
def plot(
self,
prediction_intervals=(50.0, 90.0),
show_mean=False,
color="b",
label=None,
output_file=None,
*args,
**kwargs,
):
"""
Plots the median of the forecast as well as confidence bounds.
(requires matplotlib and pandas).
Parameters
----------
prediction_intervals : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
show_mean : boolean
Whether to also show the mean of the forecast.
color : matplotlib color name or dictionary
The color used for plotting the forecast.
label : string
A label (prefix) that is used for the forecast
output_file : str or None, default None
Output path for the plot file. If None, plot is not saved to file.
args :
Other arguments are passed to main plot() call
kwargs :
Other keyword arguments are passed to main plot() call
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
label_prefix = "" if label is None else label + "-"
for c in prediction_intervals:
assert 0.0 <= c <= 100.0
ps = [50.0] + [
50.0 + f * c / 2.0
for c in prediction_intervals
for f in [-1.0, +1.0]
]
percentiles_sorted = sorted(set(ps))
def alpha_for_percentile(p):
return (p / 100.0) ** 0.3
ps_data = [self.quantile(p / 100.0) for p in percentiles_sorted]
i_p50 = len(percentiles_sorted) // 2
p50_data = ps_data[i_p50]
p50_series = pd.Series(data=p50_data, index=self.index)
#p50_series.plot(color=color, ls="-", label=f"{label_prefix}median")
p50_series.plot(color=color, ls="-", label=f"{label_prefix}median",
*args,
**kwargs,
)
if show_mean:
mean_data = np.mean(self._sorted_samples, axis=0)
pd.Series(data=mean_data, index=self.index).plot(
color=color,
ls=":",
label=f"{label_prefix}mean",
*args,
**kwargs,
)
for i in range(len(percentiles_sorted) // 2):
ptile = percentiles_sorted[i]
alpha = alpha_for_percentile(ptile)
plt.fill_between(
self.index,
ps_data[i],
ps_data[-i - 1],
facecolor=color,
alpha=alpha,
interpolate=True,
*args,
**kwargs,
)
# Hack to create labels for the error intervals.
# Doesn't actually plot anything, because we only pass a single data point
pd.Series(data=p50_data[:1], index=self.index[:1]).plot(
color=color,
alpha=alpha,
#linewidth=10,
label=f"{label_prefix}{100 - ptile * 2}%",
*args,
**kwargs,
)
if output_file:
plt.savefig(output_file)
@property
def index(self) -> pd.DatetimeIndex:
if self._index is None:
self._index = pd.date_range(
self.start_date, periods=self.prediction_length, freq=self.freq
)
return self._index
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
raise NotImplementedError()
def copy_dim(self, dim: int):
"""
Returns a new Forecast object with only the selected sub-dimension.
Parameters
----------
dim
The returned forecast object will only represent this dimension.
"""
raise NotImplementedError()
def copy_aggregate(self, agg_fun: Callable):
"""
Returns a new Forecast object with a time series aggregated over the
dimension axis.
Parameters
----------
agg_fun
Aggregation function that defines the aggregation operation
(typically mean or sum).
"""
raise NotImplementedError()
def as_json_dict(self, config: "Config") -> dict:
result = {}
if OutputType.mean in config.output_types:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
quantiles = map(Quantile.parse, config.quantiles)
result["quantiles"] = {
quantile.name: self.quantile(quantile.value).tolist()
for quantile in quantiles
}
if OutputType.samples in config.output_types:
result["samples"] = []
return result
class SampleForecast(Forecast):
"""
A `Forecast` object, where the predicted distribution is represented
internally as samples.
Parameters
----------
samples
Array of size (num_samples, prediction_length) (1D case) or
(num_samples, prediction_length, target_dim) (multivariate case)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
samples: Union[mx.nd.NDArray, np.ndarray],
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
assert isinstance(
samples, (np.ndarray, mx.ndarray.ndarray.NDArray)
), "samples should be either a numpy or an mxnet array"
assert (
len(np.shape(samples)) == 2 or len(np.shape(samples)) == 3
), "samples should be a 2-dimensional or 3-dimensional array. Dimensions found: {}".format(
len(np.shape(samples))
)
self.samples = (
samples if (isinstance(samples, np.ndarray)) else samples.asnumpy()
)
self._sorted_samples_value = None
self._mean = None
self._dim = None
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
@property
def _sorted_samples(self):
if self._sorted_samples_value is None:
self._sorted_samples_value = np.sort(self.samples, axis=0)
return self._sorted_samples_value
@property
def num_samples(self):
"""
The number of samples representing the forecast.
"""
return self.samples.shape[0]
@property
def prediction_length(self):
"""
Time length of the forecast.
"""
return self.samples.shape[1]
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
return np.mean(self.samples, axis=0)
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.mean, index=self.index)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q = Quantile.parse(q).value
sample_idx = int(np.round((self.num_samples - 1) * q))
return self._sorted_samples[sample_idx, :]
def copy_dim(self, dim: int) -> "SampleForecast":
if len(self.samples.shape) == 2:
samples = self.samples
else:
target_dim = self.samples.shape[2]
assert dim < target_dim, (
f"must set 0 <= dim < target_dim, but got dim={dim},"
f" target_dim={target_dim}"
)
samples = self.samples[:, :, dim]
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def copy_aggregate(self, agg_fun: Callable) -> "SampleForecast":
if len(self.samples.shape) == 2:
samples = self.samples
else:
# Aggregate over target dimension axis
samples = agg_fun(self.samples, axis=2)
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if len(self.samples.shape) == 2:
# univariate target
# shape: (num_samples, prediction_length)
return 1
else:
# multivariate target
# shape: (num_samples, prediction_length, target_dim)
return self.samples.shape[2]
def as_json_dict(self, config: "Config") -> dict:
result = super().as_json_dict(config)
if OutputType.samples in config.output_types:
result["samples"] = self.samples.tolist()
return result
def __repr__(self):
return ", ".join(
[
f"SampleForecast({self.samples!r})",
f"{self.start_date!r}",
f"{self.freq!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class QuantileForecast(Forecast):
"""
A Forecast that contains arrays (i.e. time series) for quantiles and mean
Parameters
----------
forecast_arrays
An array of forecasts
start_date
start of the forecast
freq
forecast frequency
forecast_keys
A list of quantiles of the form '0.1', '0.9', etc.,
and potentially 'mean'. Each entry corresponds to one array in
forecast_arrays.
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
forecast_arrays: np.ndarray,
start_date: pd.Timestamp,
freq: str,
forecast_keys: List[str],
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.forecast_array = forecast_arrays
self.start_date = pd.Timestamp(start_date, freq=freq)
self.freq = freq
# normalize keys
self.forecast_keys = [
Quantile.from_str(key).name if key != "mean" else key
for key in forecast_keys
]
self.item_id = item_id
self.info = info
self._dim = None
shape = self.forecast_array.shape
assert shape[0] == len(self.forecast_keys), (
f"The forecast_array (shape={shape} should have the same "
f"length as the forecast_keys (len={len(self.forecast_keys)})."
)
self.prediction_length = shape[-1]
self._forecast_dict = {
k: self.forecast_array[i] for i, k in enumerate(self.forecast_keys)
}
self._nan_out = np.array([np.nan] * self.prediction_length)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q_str = Quantile.parse(q).name
# We return nan here such that evaluation runs through
return self._forecast_dict.get(q_str, self._nan_out)
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
return self._forecast_dict.get("mean", self._nan_out)
def dim(self) -> int:
if self._dim is not None:
return self._dim
else:
if (
len(self.forecast_array.shape) == 2
): # 1D target. shape: (num_samples, prediction_length)
return 1
else:
return self.forecast_array.shape[
1
] # 2D target. shape: (num_samples, target_dim, prediction_length)
def __repr__(self):
return ", ".join(
[
f"QuantileForecast({self.forecast_array!r})",
f"start_date={self.start_date!r}",
f"freq={self.freq!r}",
f"forecast_keys={self.forecast_keys!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a GluonTS distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
@validated()
def __init__(
self,
distribution: Distribution,
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.distribution = distribution
self.shape = (
self.distribution.batch_shape + self.distribution.event_shape
)
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.asnumpy()
return self._mean
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(self.mean, index=self.index)
def quantile(self, level: Union[float, str]) -> np.ndarray:
level = Quantile.parse(level).value
q = self.distribution.quantile(mx.nd.array([level])).asnumpy()[0]
return q
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample(num_samples),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
class OutputType(str, Enum):
mean = "mean"
samples = "samples"
quantiles = "quantiles"
class Config(pydantic.BaseModel):
num_samples: int = pydantic.Field(100, alias="num_eval_samples")
output_types: Set[OutputType] = {OutputType.quantiles, OutputType.mean}
# FIXME: validate list elements
quantiles: List[str] = ["0.1", "0.5", "0.9"]
class Config:
allow_population_by_field_name = True
# store additional fields
extra = "allow"
| 20,222 | 29.456325 | 99 | py |
rankpredictor | rankpredictor-master/sub/gluonts/monkey_patch/monkey_patch_property_metaclass.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import mxnet as mx
def __my__setattr__(cls, key, value):
obj = cls.__dict__.get(key)
if obj and isinstance(obj, mx.base._MXClassPropertyDescriptor):
return obj.__set__(cls, value)
return super(mx.base._MXClassPropertyMetaClass, cls).__setattr__(
key, value
)
mx.base._MXClassPropertyMetaClass.__setattr__ = __my__setattr__
| 958 | 32.068966 | 75 | py |
rankpredictor | rankpredictor-master/sub/gluonts/trainer/_base_raw.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import logging
import os
import tempfile
import time
import uuid
from typing import Any, List, NamedTuple, Optional, Union
# Third-party imports
import mxnet as mx
import mxnet.autograd as autograd
import mxnet.gluon.nn as nn
import numpy as np
# First-party imports
from gluonts.core.component import get_mxnet_context, validated
from gluonts.core.exception import GluonTSDataError
from gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader
from gluonts.support.util import HybridContext
from gluonts.gluonts_tqdm import tqdm
# Relative imports
from . import learning_rate_scheduler as lrs
logger = logging.getLogger("trainer")
MODEL_ARTIFACT_FILE_NAME = "model"
STATE_ARTIFACT_FILE_NAME = "state"
# make the IDE happy: mx.py does not explicitly import autograd
mx.autograd = autograd
def check_loss_finite(val: float) -> None:
if not np.isfinite(val):
raise GluonTSDataError(
"Encountered invalid loss value! Try reducing the learning rate "
"or try a different likelihood."
)
def loss_value(loss: mx.metric.Loss) -> float:
return loss.get_name_value()[0][1]
class BestEpochInfo(NamedTuple):
params_path: str
epoch_no: int
metric_value: float
class Trainer:
r"""
A trainer specifies how a network is going to be trained.
A trainer is mainly defined by two sets of parameters. The first one determines the number of examples
that the network will be trained on (`epochs`, `num_batches_per_epoch` and `batch_size`), while the
second one specifies how the gradient updates are performed (`learning_rate`, `learning_rate_decay_factor`,
`patience`, `minimum_learning_rate`, `clip_gradient` and `weight_decay`).
Parameters
----------
ctx
epochs
Number of epochs that the network will train (default: 1).
batch_size
Number of examples in each batch (default: 32).
num_batches_per_epoch
Number of batches at each epoch (default: 100).
learning_rate
Initial learning rate (default: :math:`10^{-3}`).
learning_rate_decay_factor
Factor (between 0 and 1) by which to decrease the learning rate (default: 0.5).
patience
The patience to observe before reducing the learning rate, nonnegative integer (default: 10).
minimum_learning_rate
Lower bound for the learning rate (default: :math:`5\cdot 10^{-5}`).
clip_gradient
Maximum value of gradient. The gradient is clipped if it is too large (default: 10).
weight_decay
The weight decay (or L2 regularization) coefficient. Modifies objective by adding a penalty for having
large weights (default :math:`10^{-8}`).
init
Initializer of the weights of the network (default: "xavier").
hybridize
"""
@validated()
def __init__(
self,
ctx: Optional[mx.Context] = None,
epochs: int = 100,
batch_size: int = 32,
num_batches_per_epoch: int = 50,
learning_rate: float = 1e-3,
learning_rate_decay_factor: float = 0.5,
patience: int = 10,
minimum_learning_rate: float = 5e-5,
clip_gradient: float = 10.0,
weight_decay: float = 1e-8,
init: Union[str, mx.initializer.Initializer] = "xavier",
hybridize: bool = True,
) -> None:
assert (
0 <= epochs < float("inf")
), "The value of `epochs` should be >= 0"
assert 0 < batch_size, "The value of `batch_size` should be > 0"
assert (
0 < num_batches_per_epoch
), "The value of `num_batches_per_epoch` should be > 0"
assert (
0 < learning_rate < float("inf")
), "The value of `learning_rate` should be > 0"
assert (
0 <= learning_rate_decay_factor < 1
), "The value of `learning_rate_decay_factor` should be in the [0, 1) range"
assert 0 <= patience, "The value of `patience` should be >= 0"
assert (
0 <= minimum_learning_rate
), "The value of `minimum_learning_rate` should be >= 0"
assert 0 < clip_gradient, "The value of `clip_gradient` should be > 0"
assert 0 <= weight_decay, "The value of `weight_decay` should be => 0"
self.epochs = epochs
self.batch_size = batch_size
self.num_batches_per_epoch = num_batches_per_epoch
self.learning_rate = learning_rate
self.learning_rate_decay_factor = learning_rate_decay_factor
self.patience = patience
self.minimum_learning_rate = minimum_learning_rate
self.clip_gradient = clip_gradient
self.weight_decay = weight_decay
self.init = init
self.hybridize = hybridize
self.ctx = ctx if ctx is not None else get_mxnet_context()
self.halt = False
def set_halt(self, signum: int, stack_frame: Any) -> None:
logging.info("Received signal: {}".format(signum))
self.halt = True
def count_model_params(self, net: nn.HybridBlock) -> int:
params = net.collect_params()
num_params = 0
for p in params:
v = params[p]
num_params += np.prod(v.shape)
return num_params
def __call__(
self,
net: nn.HybridBlock,
input_names: List[str],
train_iter: TrainDataLoader,
validation_iter: Optional[ValidationDataLoader] = None,
) -> None: # TODO: we may want to return some training information here
is_validation_available = validation_iter is not None
self.halt = False
with tempfile.TemporaryDirectory(
prefix="gluonts-trainer-temp-"
) as gluonts_temp:
def base_path() -> str:
return os.path.join(
gluonts_temp,
"{}_{}".format(STATE_ARTIFACT_FILE_NAME, uuid.uuid4()),
)
logging.info("Start model training")
net.initialize(ctx=self.ctx, init=self.init)
with HybridContext(
net=net,
hybridize=self.hybridize,
static_alloc=True,
static_shape=True,
):
batch_size = train_iter.batch_size
best_epoch_info = BestEpochInfo(
params_path="%s-%s.params" % (base_path(), "init"),
epoch_no=-1,
metric_value=np.Inf,
)
lr_scheduler = lrs.MetricAttentiveScheduler(
objective="min",
patience=self.patience,
decay_factor=self.learning_rate_decay_factor,
min_lr=self.minimum_learning_rate,
)
optimizer = mx.optimizer.Adam(
learning_rate=self.learning_rate,
lr_scheduler=lr_scheduler,
wd=self.weight_decay,
clip_gradient=self.clip_gradient,
)
trainer = mx.gluon.Trainer(
net.collect_params(),
optimizer=optimizer,
kvstore="device", # FIXME: initialize properly
)
def loop(
epoch_no, batch_iter, is_training: bool = True
) -> mx.metric.Loss:
tic = time.time()
epoch_loss = mx.metric.Loss()
with tqdm(batch_iter) as it:
for batch_no, data_entry in enumerate(it, start=1):
if self.halt:
break
inputs = [data_entry[k] for k in input_names]
with mx.autograd.record():
output = net(*inputs)
# network can returns several outputs, the first being always the loss
# when having multiple outputs, the forward returns a list in the case of hybrid and a
# tuple otherwise
# we may wrap network outputs in the future to avoid this type check
if isinstance(output, (list, tuple)):
loss = output[0]
else:
loss = output
if is_training:
loss.backward()
trainer.step(batch_size)
epoch_loss.update(None, preds=loss)
it.set_postfix(
ordered_dict={
("" if is_training else "validation_")
+ "avg_epoch_loss": loss_value(epoch_loss)
},
refresh=False,
)
# print out parameters of the network at the first pass
if batch_no == 1 and epoch_no == 0:
net_name = type(net).__name__
num_model_param = self.count_model_params(net)
logging.info(
f"Number of parameters in {net_name}: {num_model_param}"
)
# mark epoch end time and log time cost of current epoch
toc = time.time()
logging.info(
"Epoch[%d] Elapsed time %.3f seconds",
epoch_no,
(toc - tic),
)
# check and log epoch loss
check_loss_finite(loss_value(epoch_loss))
logging.info(
"Epoch[%d] Evaluation metric '%s'=%f",
epoch_no,
("" if is_training else "validation_") + "epoch_loss",
loss_value(epoch_loss),
)
return epoch_loss
for epoch_no in range(self.epochs):
if self.halt:
logging.info(
f"Epoch[{epoch_no}] Interrupting training"
)
break
curr_lr = trainer.learning_rate
logging.info(
f"Epoch[{epoch_no}] Learning rate is {curr_lr}"
)
epoch_loss = loop(epoch_no, train_iter)
if is_validation_available:
epoch_loss = loop(
epoch_no, validation_iter, is_training=False
)
lr_scheduler.step(loss_value(epoch_loss))
if loss_value(epoch_loss) < best_epoch_info.metric_value:
best_epoch_info = BestEpochInfo(
params_path="%s-%04d.params"
% (base_path(), epoch_no),
epoch_no=epoch_no,
metric_value=loss_value(epoch_loss),
)
net.save_parameters(
best_epoch_info.params_path
) # TODO: handle possible exception
if not trainer.learning_rate == curr_lr:
logging.info(
f"Loading parameters from best epoch "
f"({best_epoch_info.epoch_no})"
)
net.load_parameters(
best_epoch_info.params_path, self.ctx
)
logging.info(
f"Loading parameters from best epoch "
f"({best_epoch_info.epoch_no})"
)
net.load_parameters(best_epoch_info.params_path, self.ctx)
logging.info(
f"Final loss: {best_epoch_info.metric_value} "
f"(occurred at epoch {best_epoch_info.epoch_no})"
)
# save net parameters
net.save_parameters(best_epoch_info.params_path)
logging.getLogger().info("End model training")
| 13,036 | 37.344118 | 118 | py |
rankpredictor | rankpredictor-master/sub/gluonts/trainer/learning_rate_scheduler-raw.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import mxnet as mx
import numpy as np
class MetricAttentiveScheduler(mx.lr_scheduler.LRScheduler):
r"""
This scheduler decreases the learning rate based on the value of some
validation metric to be optimized (maximized or minimized). The value
of such metric is provided by calling the `step` method on the scheduler.
A `patience` parameter must be provided, and the scheduler will reduce
the learning rate if no improvement in the metric is done before
`patience` observations of the metric.
Examples:
`patience = 0`: learning rate will decrease at every call to
`step`, regardless of the metric value
`patience = 1`: learning rate is reduced as soon `step` is called
with a metric value which does not improve over the best encountered
`patience = 10`: learning rate is reduced if no improvement in the
metric is recorded in 10 successive calls to `step`
Parameters
----------
objective
String, can either be `"min"` or `"max"`
patience
The patience to observe before reducing the learning rate, nonnegative integer.
base_lr
Initial learning rate to be used.
decay_factor
Factor (between 0 and 1) by which to decrease the learning rate.
min_lr
Lower bound for the learning rate, learning rate will never go below `min_lr`
"""
def __init__(
self,
objective: str,
patience: int,
base_lr: float = 0.01,
decay_factor: float = 0.5,
min_lr: float = 0.0,
) -> None:
assert base_lr > 0, f"base_lr should be positive, got {base_lr}"
assert (
base_lr > min_lr
), f"base_lr should greater than min_lr, {base_lr} <= {min_lr}"
assert (
0 < decay_factor < 1
), f"decay_factor factor should be between 0 and 1, got {decay_factor}"
assert patience >= 0, f"patience should be nonnegative, got {patience}"
assert objective in [
"min",
"max",
], f"objective should be 'min' or 'max', got {objective}"
super(MetricAttentiveScheduler, self).__init__(base_lr=base_lr)
self.decay_factor = decay_factor
self.patience = patience
self.objective = objective
self.min_lr = min_lr
self.best_metric = np.Inf if objective == "min" else -np.Inf
self.prev_change = 0
self.epoch_no = 0
self.curr_lr = None
def __call__(self, num_update: int) -> float:
if self.curr_lr is None:
self.curr_lr = self.base_lr
assert self.curr_lr is not None
return self.curr_lr
def step(self, metric_value: float) -> None:
"""
Inform the scheduler of the new value of the metric that is being
optimized. This method should be invoked at regular intervals (e.g.
at the end of every epoch, after computing a validation score).
Parameters
----------
metric_value
Value of the metric that is being optimized.
"""
if self.curr_lr is None:
self.curr_lr = self.base_lr
assert self.curr_lr is not None
metric_improved = (
self.objective == "min" and metric_value < self.best_metric
) or (self.objective == "max" and metric_value > self.best_metric)
if metric_improved:
self.best_metric = metric_value
self.prev_change = self.epoch_no
if self.epoch_no - self.prev_change >= self.patience:
self.curr_lr = max(self.min_lr, self.decay_factor * self.curr_lr)
self.prev_change = self.epoch_no
self.epoch_no += 1
| 4,306 | 33.456 | 87 | py |
rankpredictor | rankpredictor-master/sub/gluonts/trainer/learning_rate_scheduler.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import mxnet as mx
import numpy as np
class MetricAttentiveScheduler(mx.lr_scheduler.LRScheduler):
r"""
This scheduler decreases the learning rate based on the value of some
validation metric to be optimized (maximized or minimized). The value
of such metric is provided by calling the `step` method on the scheduler.
A `patience` parameter must be provided, and the scheduler will reduce
the learning rate if no improvement in the metric is done before
`patience` observations of the metric.
Examples:
`patience = 0`: learning rate will decrease at every call to
`step`, regardless of the metric value
`patience = 1`: learning rate is reduced as soon `step` is called
with a metric value which does not improve over the best encountered
`patience = 10`: learning rate is reduced if no improvement in the
metric is recorded in 10 successive calls to `step`
Parameters
----------
objective
String, can either be `"min"` or `"max"`
patience
The patience to observe before reducing the learning rate, nonnegative integer.
base_lr
Initial learning rate to be used.
decay_factor
Factor (between 0 and 1) by which to decrease the learning rate.
min_lr
Lower bound for the learning rate, learning rate will never go below `min_lr`
"""
def __init__(
self,
objective: str,
patience: int,
base_lr: float = 0.01,
decay_factor: float = 0.5,
min_lr: float = 0.0,
) -> None:
assert base_lr > 0, f"base_lr should be positive, got {base_lr}"
assert (
base_lr > min_lr
), f"base_lr should greater than min_lr, {base_lr} <= {min_lr}"
assert (
0 < decay_factor < 1
), f"decay_factor factor should be between 0 and 1, got {decay_factor}"
assert patience >= 0, f"patience should be nonnegative, got {patience}"
assert objective in [
"min",
"max",
], f"objective should be 'min' or 'max', got {objective}"
super(MetricAttentiveScheduler, self).__init__(base_lr=base_lr)
self.decay_factor = decay_factor
self.patience = patience
self.objective = objective
self.min_lr = min_lr
self.best_metric = np.Inf if objective == "min" else -np.Inf
self.prev_change = 0
self.epoch_no = 0
self.curr_lr = None
def __call__(self, num_update: int) -> float:
if self.curr_lr is None:
self.curr_lr = self.base_lr
assert self.curr_lr is not None
return self.curr_lr
def step(self, metric_value: float) -> bool:
"""
Inform the scheduler of the new value of the metric that is being
optimized. This method should be invoked at regular intervals (e.g.
at the end of every epoch, after computing a validation score).
Parameters
----------
metric_value
Value of the metric that is being optimized.
"""
if self.curr_lr is None:
self.curr_lr = self.base_lr
assert self.curr_lr is not None
metric_improved = (
self.objective == "min" and metric_value < self.best_metric
) or (self.objective == "max" and metric_value > self.best_metric)
if metric_improved:
self.best_metric = metric_value
self.prev_change = self.epoch_no
if self.epoch_no - self.prev_change >= self.patience:
if self.curr_lr == self.min_lr:
return False
self.curr_lr = max(self.min_lr, self.decay_factor * self.curr_lr)
self.prev_change = self.epoch_no
self.epoch_no += 1
return True
| 4,399 | 33.375 | 87 | py |
rankpredictor | rankpredictor-master/sub/gluonts/trainer/_base.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import logging
import os
import tempfile
import time
import uuid
from typing import Any, List, NamedTuple, Optional, Union
# Third-party imports
import mxnet as mx
import mxnet.autograd as autograd
import mxnet.gluon.nn as nn
import numpy as np
# First-party imports
from gluonts.core.component import get_mxnet_context, validated
from gluonts.core.exception import GluonTSDataError
from gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader
from gluonts.support.util import HybridContext
from gluonts.gluonts_tqdm import tqdm
# Relative imports
from . import learning_rate_scheduler as lrs
logger = logging.getLogger("trainer")
MODEL_ARTIFACT_FILE_NAME = "model"
STATE_ARTIFACT_FILE_NAME = "state"
# make the IDE happy: mx.py does not explicitly import autograd
mx.autograd = autograd
def check_loss_finite(val: float) -> None:
if not np.isfinite(val):
raise GluonTSDataError(
"Encountered invalid loss value! Try reducing the learning rate "
"or try a different likelihood."
)
def loss_value(loss: mx.metric.Loss) -> float:
return loss.get_name_value()[0][1]
class BestEpochInfo(NamedTuple):
params_path: str
epoch_no: int
metric_value: float
class Trainer:
r"""
A trainer specifies how a network is going to be trained.
A trainer is mainly defined by two sets of parameters. The first one determines the number of examples
that the network will be trained on (`epochs`, `num_batches_per_epoch` and `batch_size`), while the
second one specifies how the gradient updates are performed (`learning_rate`, `learning_rate_decay_factor`,
`patience`, `minimum_learning_rate`, `clip_gradient` and `weight_decay`).
Parameters
----------
ctx
epochs
Number of epochs that the network will train (default: 1).
batch_size
Number of examples in each batch (default: 32).
num_batches_per_epoch
Number of batches at each epoch (default: 100).
learning_rate
Initial learning rate (default: :math:`10^{-3}`).
learning_rate_decay_factor
Factor (between 0 and 1) by which to decrease the learning rate (default: 0.5).
patience
The patience to observe before reducing the learning rate, nonnegative integer (default: 10).
minimum_learning_rate
Lower bound for the learning rate (default: :math:`5\cdot 10^{-5}`).
clip_gradient
Maximum value of gradient. The gradient is clipped if it is too large (default: 10).
weight_decay
The weight decay (or L2 regularization) coefficient. Modifies objective by adding a penalty for having
large weights (default :math:`10^{-8}`).
init
Initializer of the weights of the network (default: "xavier").
hybridize
"""
@validated()
def __init__(
self,
ctx: Optional[mx.Context] = None,
epochs: int = 100,
batch_size: int = 32,
num_batches_per_epoch: int = 50,
learning_rate: float = 1e-3,
learning_rate_decay_factor: float = 0.5,
patience: int = 10,
minimum_learning_rate: float = 5e-5,
clip_gradient: float = 10.0,
weight_decay: float = 1e-8,
init: Union[str, mx.initializer.Initializer] = "xavier",
hybridize: bool = True,
) -> None:
assert (
0 <= epochs < float("inf")
), "The value of `epochs` should be >= 0"
assert 0 < batch_size, "The value of `batch_size` should be > 0"
assert (
0 < num_batches_per_epoch
), "The value of `num_batches_per_epoch` should be > 0"
assert (
0 < learning_rate < float("inf")
), "The value of `learning_rate` should be > 0"
assert (
0 <= learning_rate_decay_factor < 1
), "The value of `learning_rate_decay_factor` should be in the [0, 1) range"
assert 0 <= patience, "The value of `patience` should be >= 0"
assert (
0 <= minimum_learning_rate
), "The value of `minimum_learning_rate` should be >= 0"
assert 0 < clip_gradient, "The value of `clip_gradient` should be > 0"
assert 0 <= weight_decay, "The value of `weight_decay` should be => 0"
self.epochs = epochs
self.batch_size = batch_size
self.num_batches_per_epoch = num_batches_per_epoch
self.learning_rate = learning_rate
self.learning_rate_decay_factor = learning_rate_decay_factor
self.patience = patience
self.minimum_learning_rate = minimum_learning_rate
self.clip_gradient = clip_gradient
self.weight_decay = weight_decay
self.init = init
self.hybridize = hybridize
self.ctx = ctx if ctx is not None else get_mxnet_context()
self.halt = False
def set_halt(self, signum: int, stack_frame: Any) -> None:
logging.info("Received signal: {}".format(signum))
self.halt = True
def count_model_params(self, net: nn.HybridBlock) -> int:
params = net.collect_params()
num_params = 0
for p in params:
v = params[p]
num_params += np.prod(v.shape)
return num_params
def __call__(
self,
net: nn.HybridBlock,
input_names: List[str],
train_iter: TrainDataLoader,
validation_iter: Optional[ValidationDataLoader] = None,
) -> None: # TODO: we may want to return some training information here
is_validation_available = validation_iter is not None
self.halt = False
with tempfile.TemporaryDirectory(
prefix="gluonts-trainer-temp-"
) as gluonts_temp:
def base_path() -> str:
return os.path.join(
gluonts_temp,
"{}_{}".format(STATE_ARTIFACT_FILE_NAME, uuid.uuid4()),
)
logging.info("Start model training")
net.initialize(ctx=self.ctx, init=self.init)
with HybridContext(
net=net,
hybridize=self.hybridize,
static_alloc=True,
static_shape=True,
):
batch_size = train_iter.batch_size
best_epoch_info = BestEpochInfo(
params_path="%s-%s.params" % (base_path(), "init"),
epoch_no=-1,
metric_value=np.Inf,
)
lr_scheduler = lrs.MetricAttentiveScheduler(
objective="min",
patience=self.patience,
decay_factor=self.learning_rate_decay_factor,
min_lr=self.minimum_learning_rate,
)
optimizer = mx.optimizer.Adam(
learning_rate=self.learning_rate,
lr_scheduler=lr_scheduler,
wd=self.weight_decay,
clip_gradient=self.clip_gradient,
)
trainer = mx.gluon.Trainer(
net.collect_params(),
optimizer=optimizer,
kvstore="device", # FIXME: initialize properly
)
def loop(
epoch_no, batch_iter, is_training: bool = True
) -> mx.metric.Loss:
tic = time.time()
epoch_loss = mx.metric.Loss()
with tqdm(batch_iter) as it:
for batch_no, data_entry in enumerate(it, start=1):
if self.halt:
break
inputs = [data_entry[k] for k in input_names]
with mx.autograd.record():
output = net(*inputs)
# network can returns several outputs, the first being always the loss
# when having multiple outputs, the forward returns a list in the case of hybrid and a
# tuple otherwise
# we may wrap network outputs in the future to avoid this type check
if isinstance(output, (list, tuple)):
loss = output[0]
else:
loss = output
if is_training:
loss.backward()
trainer.step(batch_size)
epoch_loss.update(None, preds=loss)
it.set_postfix(
ordered_dict={
("" if is_training else "validation_")
+ "avg_epoch_loss": loss_value(epoch_loss)
},
refresh=False,
)
# print out parameters of the network at the first pass
if batch_no == 1 and epoch_no == 0:
net_name = type(net).__name__
num_model_param = self.count_model_params(net)
logging.info(
f"Number of parameters in {net_name}: {num_model_param}"
)
# mark epoch end time and log time cost of current epoch
toc = time.time()
logging.info(
"Epoch[%d] Elapsed time %.3f seconds",
epoch_no,
(toc - tic),
)
# check and log epoch loss
check_loss_finite(loss_value(epoch_loss))
logging.info(
"Epoch[%d] Evaluation metric '%s'=%f",
epoch_no,
("" if is_training else "validation_") + "epoch_loss",
loss_value(epoch_loss),
)
return epoch_loss
for epoch_no in range(self.epochs):
if self.halt:
logging.info(
f"Epoch[{epoch_no}] Interrupting training"
)
break
curr_lr = trainer.learning_rate
logging.info(
f"Epoch[{epoch_no}] Learning rate is {curr_lr}"
)
epoch_loss = loop(epoch_no, train_iter)
if is_validation_available:
epoch_loss = loop(
epoch_no, validation_iter, is_training=False
)
should_continue = lr_scheduler.step(loss_value(epoch_loss))
if not should_continue:
logger.info("Stopping training")
break
if loss_value(epoch_loss) < best_epoch_info.metric_value:
best_epoch_info = BestEpochInfo(
params_path="%s-%04d.params"
% (base_path(), epoch_no),
epoch_no=epoch_no,
metric_value=loss_value(epoch_loss),
)
net.save_parameters(
best_epoch_info.params_path
) # TODO: handle possible exception
if not trainer.learning_rate == curr_lr:
logging.info(
f"Loading parameters from best epoch "
f"({best_epoch_info.epoch_no})"
)
net.load_parameters(
best_epoch_info.params_path, self.ctx
)
logging.info(
f"Loading parameters from best epoch "
f"({best_epoch_info.epoch_no})"
)
net.load_parameters(best_epoch_info.params_path, self.ctx)
logging.info(
f"Final loss: {best_epoch_info.metric_value} "
f"(occurred at epoch {best_epoch_info.epoch_no})"
)
# save net parameters
net.save_parameters(best_epoch_info.params_path)
logging.getLogger().info("End model training")
| 13,185 | 37.443149 | 118 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/scaler.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple
# Third-party imports
from mxnet.gluon import nn
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class Scaler(nn.HybridBlock):
"""
Base class for blocks used to scale data.
Parameters
----------
keepdims
toggle to keep the dimension of the input tensor.
"""
def __init__(self, keepdims: bool = False):
super().__init__()
self.keepdims = keepdims
def compute_scale(self, F, data: Tensor, observed_indicator: Tensor):
"""
Computes the scale of the given input data.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
"""
raise NotImplementedError()
# noinspection PyMethodOverriding
def hybrid_forward(
self, F, data: Tensor, observed_indicator: Tensor
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
Returns
-------
Tensor
Tensor containing the "scaled" data, shape: (N, T, C).
Tensor
Tensor containing the scale, of shape (N, C) if ``keepdims == False``, and shape
(N, 1, C) if ``keepdims == True``.
"""
scale = self.compute_scale(F, data, observed_indicator)
if self.keepdims:
scale = scale.expand_dims(axis=1)
return F.broadcast_div(data, scale), scale
else:
return F.broadcast_div(data, scale.expand_dims(axis=1)), scale
class MeanScaler(Scaler):
"""
The ``MeanScaler`` computes a per-item scale according to the average
absolute value over time of each item. The average is computed only among
the observed values in the data tensor, as indicated by the second
argument. Items with no observed data are assigned a scale based on the
global average.
Parameters
----------
minimum_scale
default scale that is used if the time series has only zeros.
"""
@validated()
def __init__(self, minimum_scale: float = 1e-10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.minimum_scale = minimum_scale
def compute_scale(
self, F, data: Tensor, observed_indicator: Tensor # shapes (N, T, C)
) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
Returns
-------
Tensor
shape (N, C), computed according to the
average absolute value over time of the observed values.
"""
# these will have shape (N, C)
num_observed = F.sum(observed_indicator, axis=1)
sum_observed = (data.abs() * observed_indicator).sum(axis=1)
# first compute a global scale per-dimension
total_observed = num_observed.sum(axis=0)
denominator = F.maximum(total_observed, 1.0)
default_scale = sum_observed.sum(axis=0) / denominator # shape (C, )
# then compute a per-item, per-dimension scale
denominator = F.maximum(num_observed, 1.0)
scale = sum_observed / denominator # shape (N, C)
# use per-batch scale when no element is observed
# or when the sequence contains only zeros
cond = F.broadcast_greater(sum_observed, F.zeros_like(sum_observed))
scale = F.where(
cond,
scale,
F.broadcast_mul(default_scale, F.ones_like(num_observed)),
)
return F.maximum(scale, self.minimum_scale)
class NOPScaler(Scaler):
"""
The ``NOPScaler`` assigns a scale equals to 1 to each input item, i.e.,
no scaling is applied upon calling the ``NOPScaler``.
"""
@validated()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# noinspection PyMethodOverriding
def compute_scale(
self, F, data: Tensor, observed_indicator: Tensor
) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
Returns
-------
Tensor
shape (N, C), identically equal to 1.
"""
return F.ones_like(data).mean(axis=1)
| 6,382 | 30.756219 | 92 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/cnn.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Optional, Union, List, Tuple
# Third-party imports
from mxnet import gluon
from mxnet.gluon import nn
# First-party imports
from gluonts.model.common import Tensor
class CausalConv1D(gluon.HybridBlock):
"""
1D causal temporal convolution, where the term causal means that output[t]
does not depend on input[t+1:]. Notice that Conv1D is not implemented in
Gluon.
This is the basic structure used in Wavenet [ODZ+16]_ and Temporal
Convolution Network [BKK18]_.
The output has the same shape as the input, while we always left-pad zeros.
Parameters
----------
channels
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
activation
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
"""
def __init__(
self,
channels: int,
kernel_size: int,
dilation: int = 1,
activation: Optional[str] = "relu",
**kwargs,
):
super(CausalConv1D, self).__init__(**kwargs)
self.dilation = dilation
self.kernel_size = kernel_size
self.padding = dilation * (kernel_size - 1)
self.conv1d = nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
dilation=dilation,
padding=self.padding,
activation=activation,
**kwargs,
)
# noinspection PyMethodOverriding
def hybrid_forward(self, F, data: Tensor) -> Tensor:
"""
In Gluon's conv1D implementation, input has dimension NCW where N is
batch_size, C is channel, and W is time (sequence_length).
Parameters
----------
data
Shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
causal conv1d output. Shape (batch_size, num_features, sequence_length)
"""
ct = self.conv1d(data)
if self.kernel_size > 0:
ct = F.slice_axis(ct, axis=2, begin=0, end=-self.padding)
return ct
class DilatedCausalGated(gluon.HybridBlock):
"""
1D convolution with Gated mechanism, see the Wavenet papers described above.
Parameters
----------
inner_channels
The dimensionality of the intermediate space
out_channels
The dimensionality of the output space
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
"""
def __init__(
self,
inner_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int], List[int]],
dilation: Union[int, Tuple[int], List[int]],
**kwargs,
) -> None:
super(DilatedCausalGated, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = CausalConv1D(
channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.conv2 = CausalConv1D(
channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.output_conv = gluon.nn.Conv1D(
channels=out_channels, kernel_size=1
)
# noinspection PyMethodOverriding
def hybrid_forward(self, F, x: Tensor) -> Tensor:
"""
Compute the 1D convolution with Gated mechanism.
Parameters
----------
x
input features, shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
output, shape (batch_size, num_features, sequence_length)
"""
x1 = self.conv1(x)
x2 = self.conv2(x)
return self.output_conv(x1 * x2)
class ResidualSequential(gluon.nn.HybridSequential):
"""
Adding residual connection to each layer of the hybrid sequential blocks
"""
def __init__(self, **kwargs):
super(ResidualSequential, self).__init__(**kwargs)
# noinspection PyMethodOverriding
def hybrid_forward(self, F, x: Tensor) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
x
input tensor
Returns
-------
Tensor
output of the ResidualSequential
"""
outs = []
for i, block in enumerate(self._children.values()):
out = block(x)
outs.append(out)
if i == 0:
x = out
else:
x = x + out
return sum(outs)
| 5,706 | 27.252475 | 83 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/mlp.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
from mxnet.gluon import nn
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class MLP(nn.HybridBlock):
"""
Defines an MLP block.
Parameters
----------
layer_sizes
number of hidden units per layer.
flatten
toggle whether to flatten the output tensor.
activation
activation function of the MLP, default is relu.
"""
@validated()
def __init__(
self, layer_sizes: List[int], flatten: bool, activation="relu"
) -> None:
super().__init__()
self.layer_sizes = layer_sizes
with self.name_scope():
self.layers = nn.HybridSequential()
for layer, layer_dim in enumerate(layer_sizes):
self.layers.add(
nn.Dense(layer_dim, flatten=flatten, activation=activation)
)
# noinspection PyMethodOverriding
def hybrid_forward(self, F, x: Tensor) -> Tensor:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
x
Input tensor
Returns
-------
Tensor
Output of the MLP given the input tensor.
"""
return self.layers(x)
| 1,977 | 26.859155 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Tuple
# Third-party imports
from mxnet.gluon import nn
# First-party imports
from gluonts.block.cnn import CausalConv1D
from gluonts.block.mlp import MLP
from gluonts.block.rnn import RNN
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class Seq2SeqEncoder(nn.HybridBlock):
"""
Abstract class for the encoder. An encoder takes a `target` sequence with
corresponding covariates and maps it into a static latent and
a dynamic latent code with the same length as the `target` sequence.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def hybrid_forward(
self,
F,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F:
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
raise NotImplementedError
@staticmethod
def _assemble_inputs(
F, target: Tensor, static_features: Tensor, dynamic_features: Tensor
) -> Tensor:
"""
Assemble features from target, static features, and the dynamic
features.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
combined features,
shape (batch_size, sequence_length,
num_static_features + num_dynamic_features + 1)
"""
target = target.expand_dims(axis=-1) # (N, T, 1)
helper_ones = F.ones_like(target) # Ones of (N, T, 1)
tiled_static_features = F.batch_dot(
helper_ones, static_features.expand_dims(1)
) # (N, T, C)
inputs = F.concat(
target, tiled_static_features, dynamic_features, dim=2
) # (N, T, C)
return inputs
class HierarchicalCausalConv1DEncoder(Seq2SeqEncoder):
"""
Defines a stack of dilated convolutions as the encoder.
See the following paper for details:
1. Van Den Oord, A., Dieleman, S., Zen, H., Simonyan, K., Vinyals, O., Graves, A., Kalchbrenner,
N., Senior, A.W. and Kavukcuoglu, K., 2016, September. WaveNet: A generative model for raw audio. In SSW (p. 125).
Parameters
----------
dilation_seq
dilation for each convolution in the stack.
kernel_size_seq
kernel size for each convolution in the stack.
channels_seq
number of channels for each convolution in the stack.
use_residual
flag to toggle using residual connections.
use_covariates
flag to toggle whether to use coveriates as input to the encoder
"""
@validated()
def __init__(
self,
dilation_seq: List[int],
kernel_size_seq: List[int],
channels_seq: List[int],
use_residual: bool = False,
use_covariates: bool = False,
**kwargs,
) -> None:
assert all(
[x > 0 for x in dilation_seq]
), "`dilation_seq` values must be greater than zero"
assert all(
[x > 0 for x in kernel_size_seq]
), "`kernel_size_seq` values must be greater than zero"
assert all(
[x > 0 for x in channels_seq]
), "`channel_dim_seq` values must be greater than zero"
super().__init__(**kwargs)
self.use_residual = use_residual
self.use_covariates = use_covariates
self.cnn = nn.HybridSequential()
it = zip(channels_seq, kernel_size_seq, dilation_seq)
for layer_no, (channels, kernel_size, dilation) in enumerate(it):
convolution = CausalConv1D(
channels=channels,
kernel_size=kernel_size,
dilation=dilation,
activation="relu",
prefix=f"conv_{layer_no:#02d}'_",
)
self.cnn.add(convolution)
def hybrid_forward(
self,
F,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
if self.use_covariates:
inputs = Seq2SeqEncoder._assemble_inputs(
F,
target=target,
static_features=static_features,
dynamic_features=dynamic_features,
)
else:
inputs = target
# NTC -> NCT (or NCW)
ct = inputs.swapaxes(1, 2)
ct = self.cnn(ct)
ct = ct.swapaxes(1, 2)
# now we are back in NTC
if self.use_residual:
ct = F.concat(ct, target, dim=2)
# return the last state as the static code
static_code = F.slice_axis(ct, axis=1, begin=-1, end=None)
static_code = F.squeeze(static_code, axis=1)
return static_code, ct
class RNNEncoder(Seq2SeqEncoder):
"""
Defines an RNN as the encoder.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
with self.name_scope():
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def hybrid_forward(
self,
F,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
dynamic_code = self.rnn(target)
static_code = F.slice_axis(dynamic_code, axis=1, begin=-1, end=None)
return static_code, dynamic_code
class MLPEncoder(Seq2SeqEncoder):
"""
Defines a multilayer perceptron used as an encoder.
Parameters
----------
layer_sizes
number of hidden units per layer.
kwargs
"""
@validated()
def __init__(self, layer_sizes: List[int], **kwargs) -> None:
super().__init__(**kwargs)
self.model = MLP(layer_sizes, flatten=True)
def hybrid_forward(
self,
F,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
F, target, static_features, dynamic_features
)
static_code = self.model(inputs)
dynamic_code = F.zeros_like(target).expand_dims(2)
return static_code, dynamic_code
class RNNCovariateEncoder(Seq2SeqEncoder):
"""
Defines RNN encoder that uses covariates and target as input to the RNN.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
with self.name_scope():
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def hybrid_forward(
self,
F,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
F, target, static_features, dynamic_features
)
dynamic_code = self.rnn(inputs)
# using the last state as the static code,
# but not working as well as the concat of all the previous states
static_code = F.squeeze(
F.slice_axis(dynamic_code, axis=1, begin=-1, end=None), axis=1
)
return static_code, dynamic_code
| 13,120 | 26.507338 | 118 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/enc2dec.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple
# Third-party imports
from mxnet.gluon import nn
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class Seq2SeqEnc2Dec(nn.HybridBlock):
"""
Abstract class for any module that pass encoder to decoder, such as
attention network.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def hybrid_forward(
self,
F,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
pass
class PassThroughEnc2Dec(Seq2SeqEnc2Dec):
"""
Simplest class for passing encoder tensors do decoder. Passes through
tensors.
"""
def hybrid_forward(
self,
F,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
return encoder_output_static, encoder_output_dynamic, future_features
| 2,994 | 25.741071 | 77 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
from mxnet.gluon import nn
# First-party imports
from gluonts.block.mlp import MLP
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class Seq2SeqDecoder(nn.HybridBlock):
"""
Abstract class for the Decoder block in sequence-to-sequence models.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def hybrid_forward(
self, F, dynamic_input: Tensor, static_input: Tensor
) -> None:
"""
Abstract function definition of the hybrid_forward.
Parameters
----------
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
static_input
static features, shape (batch_size, num_features) or (N, C)
"""
pass
class ForkingMLPDecoder(Seq2SeqDecoder):
"""
Multilayer perceptron decoder for sequence-to-sequence models.
See [WTN+17]_ for details.
Parameters
----------
dec_len
length of the decoder (usually the number of forecasted time steps).
final_dim
dimensionality of the output per time step (number of predicted
quantiles).
hidden_dimension_sequence
number of hidden units for each MLP layer.
"""
@validated()
def __init__(
self,
dec_len: int,
final_dim: int,
hidden_dimension_sequence: List[int] = list([]),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dec_len = dec_len
self.final_dims = final_dim
with self.name_scope():
self.model = nn.HybridSequential()
for layer_no, layer_dim in enumerate(hidden_dimension_sequence):
layer = nn.Dense(
dec_len * layer_dim,
flatten=False,
activation="relu",
prefix=f"mlp_{layer_no:#02d}'_",
)
self.model.add(layer)
layer = nn.Dense(
dec_len * final_dim,
flatten=False,
activation="softrelu",
prefix=f"mlp_{len(hidden_dimension_sequence):#02d}'_",
)
self.model.add(layer)
def hybrid_forward(
self, F, dynamic_input: Tensor, static_input: Tensor = None
) -> Tensor:
"""
ForkingMLPDecoder forward call.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C).
static_input
not used in this decoder.
Returns
-------
Tensor
mlp output, shape (0, 0, dec_len, final_dims).
"""
mlp_output = self.model(dynamic_input)
mlp_output = mlp_output.reshape(
shape=(0, 0, self.dec_len, self.final_dims)
)
return mlp_output
class OneShotDecoder(Seq2SeqDecoder):
"""
OneShotDecoder.
Parameters
----------
decoder_length
length of the decoder (number of time steps)
layer_sizes
dimensions of the hidden layers
static_outputs_per_time_step
number of outputs per time step
"""
@validated()
def __init__(
self,
decoder_length: int,
layer_sizes: List[int],
static_outputs_per_time_step: int,
) -> None:
super().__init__()
self.decoder_length = decoder_length
self.static_outputs_per_time_step = static_outputs_per_time_step
with self.name_scope():
self.mlp = MLP(layer_sizes, flatten=False)
self.expander = nn.Dense(
units=decoder_length * static_outputs_per_time_step
)
def hybrid_forward(
self,
F,
static_input: Tensor, # (batch_size, static_input_dim)
dynamic_input: Tensor, # (batch_size,
) -> Tensor:
"""
OneShotDecoder forward call
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
static_input
static features, shape (batch_size, num_features) or (N, C)
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
Returns
-------
Tensor
mlp output, shape (batch_size, dec_len, size of last layer)
"""
static_input_tile = self.expander(static_input).reshape(
(0, self.decoder_length, self.static_outputs_per_time_step)
)
combined_input = F.concat(dynamic_input, static_input_tile, dim=2)
out = self.mlp(combined_input) # (N, T, layer_sizes[-1])
return out
| 5,630 | 26.876238 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/quantile_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
# Third-party imports
from mxnet import nd
from mxnet.gluon import nn
from mxnet.gluon.loss import Loss
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class QuantileLoss(Loss):
@validated()
def __init__(
self,
quantiles: List[float],
quantile_weights: List[float] = None,
weight=None,
batch_axis=0,
**kwargs,
) -> None:
"""
Represents the quantile loss used to fit decoders that learn quantiles.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
weight:
weighting of the loss.
batch_axis:
indicates axis that represents the batch.
"""
super().__init__(weight, batch_axis, **kwargs)
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.quantile_weights = (
nd.ones(self.num_quantiles) / self.num_quantiles
if not quantile_weights
else quantile_weights
)
# noinspection PyMethodOverriding
def hybrid_forward(
self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None
):
"""
Compute the weighted sum of quantile losses.
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1))
y_pred
predicted target, shape (N1 x N2 x ... x Nk x num_quantiles)
sample_weight
sample weights
Returns
-------
Tensor
weighted sum of the quantile losses, shape N1 x N1 x ... Nk
"""
y_pred_all = F.split(
y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1
)
qt_loss = []
for i, y_pred_q in enumerate(y_pred_all):
q = self.quantiles[i]
weighted_qt = (
self.compute_quantile_loss(F, y_true, y_pred_q, q)
* self.quantile_weights[i].asscalar()
)
qt_loss.append(weighted_qt)
stacked_qt_losses = F.stack(*qt_loss, axis=-1)
sum_qt_loss = F.mean(
stacked_qt_losses, axis=-1
) # avg across quantiles
if sample_weight is not None:
return sample_weight * sum_qt_loss
else:
return sum_qt_loss
@staticmethod
def compute_quantile_loss(
F, y_true: Tensor, y_pred_p: Tensor, p: float
) -> Tensor:
"""
Compute the quantile loss of the given quantile
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1)).
y_pred_p
predicted target quantile, shape (N1 x N2 x ... x Nk x 1).
p
quantile error to compute the loss.
Returns
-------
Tensor
quantile loss, shape: (N1 x N2 x ... x Nk x 1)
"""
under_bias = p * F.maximum(y_true - y_pred_p, 0)
over_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)
qt_loss = 2 * (under_bias + over_bias)
return qt_loss
class ProjectParams(nn.HybridBlock):
"""
Defines a dense layer to compute the projection weights into the quantile
space.
Parameters
----------
num_quantiles
number of quantiles to compute the projection.
"""
@validated()
def __init__(self, num_quantiles, **kwargs):
super().__init__(**kwargs)
with self.name_scope():
self.projection = nn.Dense(units=num_quantiles, flatten=False)
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
x
input tensor
Returns
-------
Tensor
output of the projection layer
"""
return self.projection(x)
class QuantileOutput:
"""
Output layer using a quantile loss and projection layer to connect the
quantile output to the network.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
"""
@validated()
def __init__(
self,
quantiles: List[float],
quantile_weights: Optional[List[float]] = None,
) -> None:
self.quantiles = quantiles
self.quantile_weights = quantile_weights
def get_loss(self) -> nn.HybridBlock:
"""
Returns
-------
nn.HybridBlock
constructs quantile loss object.
"""
return QuantileLoss(
quantiles=self.quantiles, quantile_weights=self.quantile_weights
)
def get_quantile_proj(self, **kwargs) -> nn.HybridBlock:
"""
Returns
-------
nn.HybridBlock
constructs projection parameter object.
"""
return ProjectParams(len(self.quantiles), **kwargs)
| 6,189 | 26.149123 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/rnn.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
from mxnet.gluon import HybridBlock, rnn
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
class RNN(HybridBlock):
"""
Defines an RNN block.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
num_hidden
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
num_hidden: int,
num_layers: int,
bidirectional: bool = False,
**kwargs,
):
super(RNN, self).__init__(**kwargs)
with self.name_scope():
if mode == "rnn_relu":
self.rnn = rnn.RNN(
num_hidden,
num_layers,
bidirectional=bidirectional,
activation="relu",
layout="NTC",
)
elif mode == "rnn_tanh":
self.rnn = rnn.RNN(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
elif mode == "lstm":
self.rnn = rnn.LSTM(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
elif mode == "gru":
self.rnn = rnn.GRU(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
else:
raise ValueError(
"Invalid mode %s. Options are rnn_relu, rnn_tanh, lstm, and gru "
% mode
)
def hybrid_forward(self, F, inputs: Tensor) -> Tensor: # NTC in, NTC out
"""
Parameters
----------
F
A module that can either refer to the Symbol API or the NDArray
API in MXNet.
inputs
input tensor with shape (batch_size, num_timesteps, num_dimensions)
Returns
-------
Tensor
rnn output with shape (batch_size, num_timesteps, num_dimensions)
"""
return self.rnn(inputs)
| 3,100 | 27.981308 | 85 | py |
rankpredictor | rankpredictor-master/sub/gluonts/block/feature.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Callable, List, Optional # noqa: F401
# Third-party imports
import mxnet.gluon.nn as nn
import numpy as np
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.model.common import Tensor
class FeatureEmbedder(nn.HybridBlock):
"""
Embed a sequence of categorical features.
Parameters
----------
cardinalities
cardinality for each categorical feature.
embedding_dims
number of dimensions to embed each categorical feature.
dtype
Data type of the embedded features.
"""
@validated()
def __init__(
self,
cardinalities: List[int],
embedding_dims: List[int],
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert (
len(cardinalities) > 0
), "Length of `cardinalities` list must be greater than zero"
assert len(cardinalities) == len(
embedding_dims
), "Length of `embedding_dims` and `embedding_dims` should match"
assert all(
[c > 0 for c in cardinalities]
), "Elements of `cardinalities` should be > 0"
assert all(
[d > 0 for d in embedding_dims]
), "Elements of `embedding_dims` should be > 0"
self.__num_features = len(cardinalities)
self.dtype = dtype
def create_embedding(i: int, c: int, d: int) -> nn.Embedding:
embedding = nn.Embedding(
c, d, prefix=f"cat_{i}_embedding_", dtype=self.dtype
)
self.register_child(embedding)
return embedding
with self.name_scope():
self.__embedders = [
create_embedding(i, c, d)
for i, (c, d) in enumerate(zip(cardinalities, embedding_dims))
]
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(self, F, features: Tensor) -> Tensor:
"""
Parameters
----------
F
features
Categorical features with shape: (N,T,C) or (N,C), where C is the
number of categorical features.
Returns
-------
concatenated_tensor: Tensor
Concatenated tensor of embeddings whth shape: (N,T,C) or (N,C),
where C is the sum of the embedding dimensions for each categorical
feature, i.e. C = sum(self.config.embedding_dims).
"""
if self.__num_features > 1:
# we slice the last dimension, giving an array of length self.__num_features with shape (N,T) or (N)
cat_feature_slices = F.split(
features, axis=-1, num_outputs=self.__num_features
)
else:
# F.split will iterate over the second-to-last axis if the last axis is one
cat_feature_slices = [features]
return F.concat(
*[
embed(F.squeeze(cat_feature_slice, axis=-1))
for embed, cat_feature_slice in zip(
self.__embedders, cat_feature_slices
)
],
dim=-1,
)
class FeatureAssembler(nn.HybridBlock):
"""
Assemble features into an MXNet tensor. Input features are distinguished based on the following criteria:
- static (time-independent) features vs dynamic (that is, time-dependent)
- categorical vs real-valued features.
Dynamic features have shape `(N, T, C)` and static features have shape `(N, C)`, where
- `N` is the number of elements in the processed batch,
- `T` is the time dimension,
- `C` is the number of features.
If multiple feature types are used, the :class:`FeatureAssembler` will assume that the N and T dimensions
are the same for all passed arguments.
Categorical features can be optionally embedded using trained embedding layers via nested :class:`FeatureEmbedder`
components.
>>> # noinspection PyTypeChecker
... embed_static = FeatureEmbedder(
... cardinalities=[2],
... embedding_dims=[3],
... prefix='embed_static_',
... )
>>> # noinspection PyTypeChecker
... embed_dynamic = FeatureEmbedder(
... cardinalities=[5, 5],
... embedding_dims=[6, 9],
... prefix='embed_dynamic_',
... )
The above snippet with four :class:`nn.Embedding` corresponding to the one static and two dynamic categorical
features. The `(input_dim, output_dim)` of these layers are going to be `(2, 3)`, `(5, 6)`, and `(5, 9)`.
The created `assemble_feature` instance will not handle real-valued features.
The subset of feature types to be used by the :class:`FeatureAssembler` instance is determined using corresponding
constructor parameters. Here is an example that constructs a feature assembler consuming only real-valued features.
>>> N, T = 50, 168
>>> assemble_feature = FeatureAssembler(
... T=T,
... # use_static_cat=True,
... # use_static_real=False,
... # use_dynamic_cat=True,
... # use_dynamic_real=False,
... embed_static=embed_static,
... embed_dynamic=embed_dynamic
... )
When the `__call__`, `forward`, or `hybrid_forward` methods of a :class:`FeatureAssembler` are called, we always
have to pass a full set of features. Missing features are represented as zero tensors with a suitable shape.
For example,
>>> import mxnet as mx
>>> feat_static_cat = mx.nd.random.uniform(0, 2, shape=(N, 1)).floor()
>>> feat_dynamic_cat = mx.nd.random.uniform(0, 5, shape=(N, 168, 2)).floor()
>>> feat_static_real = mx.nd.zeros(shape=(N, 1,)) # empty feature
>>> feat_dynamic_real = mx.nd.zeros(shape=(N, T, 1,)) # empty feature
After initializing the embedder parameters to one and instantiating some random `static_cat` and
`dynamic_cat` vectors,
>>> assemble_feature.collect_params().initialize(mx.initializer.One())
one can do a forward pass as follows.
>>> assembled_feature = assemble_feature(feat_static_cat, feat_static_real, feat_dynamic_cat, feat_dynamic_real)
>>> assembled_feature.shape
(50, 168, 20)
>>>
However, relative order of `static_cat` and `dynamic_cat` in the call above is determined by the fact that
`use_static_cat` is defined before `use_dynamic_cat` in the class constructor.
"""
@validated()
def __init__(
self,
T: int,
use_static_cat: bool = False,
use_static_real: bool = False,
use_dynamic_cat: bool = False,
use_dynamic_real: bool = False,
embed_static: Optional[FeatureEmbedder] = None,
embed_dynamic: Optional[FeatureEmbedder] = None,
dtype: DType = np.float32,
**kwargs,
) -> None:
super().__init__(**kwargs)
assert T > 0, "The value of `T` should be > 0"
self.T = T
self.dtype = dtype
self.use_static_cat = use_static_cat
self.use_static_real = use_static_real
self.use_dynamic_cat = use_dynamic_cat
self.use_dynamic_real = use_dynamic_real
self.embed_static: Callable[[Tensor], Tensor] = embed_static or (
lambda x: x
)
self.embed_dynamic: Callable[[Tensor], Tensor] = embed_dynamic or (
lambda x: x
)
# noinspection PyMethodOverriding
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
feat_dynamic_cat: Tensor,
feat_dynamic_real: Tensor,
) -> Tensor:
processed_features = [
self.process_static_cat(F, feat_static_cat),
self.process_static_real(F, feat_static_real),
self.process_dynamic_cat(F, feat_dynamic_cat),
self.process_dynamic_real(F, feat_dynamic_real),
]
return F.concat(*processed_features, dim=-1)
def process_static_cat(self, F, feature: Tensor) -> Tensor:
feature = self.embed_static(feature.astype(self.dtype))
return F.tile(feature.expand_dims(axis=1), reps=(1, self.T, 1))
def process_dynamic_cat(self, F, feature: Tensor) -> Tensor:
return self.embed_dynamic(feature.astype(self.dtype))
def process_static_real(self, F, feature: Tensor) -> Tensor:
return F.tile(feature.expand_dims(axis=1), reps=(1, self.T, 1))
def process_dynamic_real(self, F, feature: Tensor) -> Tensor:
return feature
| 9,087 | 34.224806 | 119 | py |
rankpredictor | rankpredictor-master/sub/gluonts/core/serde.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import importlib
import itertools
import json
import math
import pickle
import re
import textwrap
from functools import singledispatch
from pathlib import PurePath
from pydoc import locate
from typing import cast, Any, NamedTuple, Optional
# Third-party imports
import mxnet as mx
import numpy as np
import pandas as pd
from pydantic import BaseModel
# Relative imports
from gluonts.core import fqname_for
bad_type_msg = textwrap.dedent(
"""
Cannot serialize type {}. See the documentation of the `encode` and
`validate` functions at
http://gluon-ts.mxnet.io/api/gluonts/gluonts.html
and the Python documentation of the `__getnewargs_ex__` magic method at
https://docs.python.org/3/library/pickle.html#object.__getnewargs_ex__
for more information how to make this type serializable.
"""
).lstrip()
# Binary Serialization/Deserialization
# ------------------------------------
def dump_binary(o: Any) -> bytes:
"""
Serializes an object ``o`` to binary format.
Parameters
----------
o
The object to serialize.
Returns
-------
bytes
A sequence of bytes representing the serialized object.
See Also
--------
load_binary
Inverse function.
"""
return pickle.dumps(o)
def load_binary(b: bytes) -> Any:
"""
Deserializes an object from binary format.
Parameters
----------
b
A sequence of bytes representing the serialized object.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_binary
Inverse function.
"""
return pickle.loads(b)
# JSON Serialization/Deserialization
# ----------------------------------
# The canonical way to do this is to define and `default` and `object_hook`
# parameters to the json.dumps and json.loads methods. Unfortunately, due
# to https://bugs.python.org/issue12657 this is not possible at the moment,
# as support for custom NamedTuple serialization is broken.
#
# To circumvent the issue, we pass the input value through custom encode
# and decode functions that map nested object terms to JSON-serializable
# data structures with explicit recursion.
def dump_json(o: Any, indent: Optional[int] = None) -> str:
"""
Serializes an object to a JSON string.
Parameters
----------
o
The object to serialize.
indent
An optional number of spaced to use as an indent.
Returns
-------
str
A string representing the object in JSON format.
See Also
--------
load_json
Inverse function.
"""
return json.dumps(encode(o), indent=indent, sort_keys=True)
def load_json(s: str) -> Any:
"""
Deserializes an object from a JSON string.
Parameters
----------
s
A string representing the object in JSON format.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_json
Inverse function.
"""
return decode(json.loads(s))
# Code Serialization/Deserialization
# ----------------------------------
def dump_code(o: Any) -> str:
"""
Serializes an object to a Python code string.
Parameters
----------
o
The object to serialize.
Returns
-------
str
A string representing the object as Python code.
See Also
--------
load_code
Inverse function.
"""
def _dump_code(x: Any) -> str:
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(x) == dict and x.get("__kind__") == kind_inst:
args = x.get("args", [])
kwargs = x.get("kwargs", {})
fqname = x["class"]
bindings = ", ".join(
itertools.chain(
map(_dump_code, args),
[f"{k}={_dump_code(v)}" for k, v in kwargs.items()],
)
)
return f"{fqname}({bindings})"
if type(x) == dict and x.get("__kind__") == kind_type:
return x["class"]
if isinstance(x, dict):
inner = ", ".join(
f"{_dump_code(k)}: {_dump_code(v)}" for k, v in x.items()
)
return f"{{{inner}}}"
if isinstance(x, list):
inner = ", ".join(list(map(dump_code, x)))
return f"[{inner}]"
if isinstance(x, tuple):
inner = ", ".join(list(map(dump_code, x)))
# account for the extra `,` in `(x,)`
if len(x) == 1:
inner += ","
return f"({inner})"
if isinstance(x, str):
# json.dumps escapes the string
return json.dumps(x)
if isinstance(x, float) or np.issubdtype(type(x), np.inexact):
if math.isfinite(x):
return str(x)
else:
# e.g. `nan` needs to be encoded as `float("nan")`
return 'float("{x}")'
if isinstance(x, int) or np.issubdtype(type(x), np.integer):
return str(x)
if x is None:
return str(x)
raise RuntimeError(
f"Unexpected element type {fqname_for(x.__class__)}"
)
return _dump_code(encode(o))
def load_code(c: str) -> Any:
"""
Deserializes an object from a Python code string.
Parameters
----------
c
A string representing the object as Python code.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_code
Inverse function.
"""
def _load_code(code: str, modules=None):
if modules is None:
modules = {}
try:
return eval(code, modules)
except NameError as e:
m = re.match(r"name '(?P<module>.+)' is not defined", str(e))
if m:
name = m["module"]
return _load_code(
code,
{**(modules or {}), name: importlib.import_module(name)},
)
else:
raise e
except AttributeError as e:
m = re.match(
r"module '(?P<module>.+)' has no attribute '(?P<package>.+)'",
str(e),
)
if m:
module, package = m["module"], m["package"]
name = f"{module}.{package}"
return _load_code(
code,
{**(modules or {}), name: importlib.import_module(name)},
)
else:
raise e
except Exception as e:
raise e
return _load_code(c)
# Structural encoding/decoding
# ----------------------------
kind_type = "type"
kind_inst = "instance"
@singledispatch
def encode(v: Any) -> Any:
"""
Transforms a value `v` as a serializable intermediate representation (for
example, named tuples are encoded as dictionaries). The intermediate
representation is then recursively traversed and serialized either as
Python code or as JSON string.
This function is decorated with :func:`~functools.singledispatch` and can
be specialized by clients for families of types that are not supported by
the basic implementation (explained below).
Examples
--------
The conversion logic implemented by the basic implementation is used
as a fallback and is best explained by a series of examples.
Lists (as lists).
>>> encode([1, 2.0, '3'])
[1, 2.0, '3']
Tuples (as lists).
>>> encode((1, 2.0, '3'))
[1, 2.0, '3']
Dictionaries (as dictionaries).
>>> encode({'a': 1, 'b': 2.0, 'c': '3'})
{'a': 1, 'b': 2.0, 'c': '3'}
Named tuples (as dictionaries with a ``'__kind__': 'instance'`` member).
>>> from pprint import pprint
>>> from typing import NamedTuple
>>> class ComplexNumber(NamedTuple):
... x: float = 0.0
... y: float = 0.0
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a :func:`~gluonts.core.component.validated` initializer (as
dictionaries with a ``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a ``__getnewargs_ex__`` magic method (as dictionaries with a
``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
... def __getnewargs_ex__(self):
... return [], {'x': self.x, 'y': self.y}
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Types (as dictionaries with a ``'__kind__': 'type' member``).
>>> encode(ComplexNumber)
{'__kind__': 'type', 'class': 'gluonts.core.serde.ComplexNumber'}
Parameters
----------
v
The value to be encoded.
Returns
-------
Any
An encoding of ``v`` that can be serialized to Python code or
JSON string.
See Also
--------
decode
Inverse function.
dump_json
Serializes an object to a JSON string.
dump_code
Serializes an object to a Python code string.
"""
if isinstance(v, type(None)):
return None
if isinstance(v, (float, int, str)):
return v
if np.issubdtype(type(v), np.inexact):
return float(v)
if np.issubdtype(type(v), np.integer):
return int(v)
# we have to check for namedtuples first, to encode them not as plain
# tuples (which would become lists)
if isinstance(v, tuple) and hasattr(v, "_asdict"):
v = cast(NamedTuple, v)
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"kwargs": encode(v._asdict()),
}
if isinstance(v, (list, set, tuple)):
return list(map(encode, v))
if isinstance(v, dict):
return {k: encode(v) for k, v in v.items()}
if isinstance(v, type):
return {"__kind__": kind_type, "class": fqname_for(v)}
if hasattr(v, "__getnewargs_ex__"):
args, kwargs = v.__getnewargs_ex__() # mypy: ignore
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode(args),
"kwargs": encode(kwargs),
}
raise RuntimeError(bad_type_msg.format(fqname_for(v.__class__)))
@encode.register(PurePath)
def encode_path(v: PurePath) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~PurePath` class.
"""
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode([str(v)]),
}
@encode.register(BaseModel)
def encode_pydantic_model(v: BaseModel) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~BaseModel` class.
"""
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"kwargs": encode(v.__dict__),
}
@encode.register(mx.Context)
def encode_mx_context(v: mx.Context) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~mxnet.Context` class.
"""
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode([v.device_type, v.device_id]),
}
@encode.register(np.ndarray)
def encode_np_ndarray(v: np.ndarray) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~mxnet.Context` class.
"""
return {
"__kind__": kind_inst,
"class": "numpy.array", # use "array" ctor instead of "nparray" class
"args": encode([v.tolist(), v.dtype]),
}
@encode.register(pd.Timestamp)
def encode_pd_timestamp(v: pd.Timestamp) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~pandas.Timestamp` class.
"""
return {
"__kind__": kind_inst,
"class": "pandas.Timestamp",
"args": encode([str(v)]),
"kwargs": {"freq": v.freqstr if v.freq else None},
}
@encode.register(np.dtype)
def encode_np_dtype(v: np.dtype) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~mxnet.Context` class.
"""
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode([v.name]),
}
@encode.register(mx.nd.NDArray)
def encode_mx_ndarray(v: mx.nd.NDArray) -> Any:
return {
"__kind__": kind_inst,
"class": "mxnet.nd.array",
"args": encode([v.asnumpy().tolist()]),
"kwargs": {"dtype": encode(v.dtype)},
}
def decode(r: Any) -> Any:
"""
Decodes a value from an intermediate representation `r`.
Parameters
----------
r
An intermediate representation to be decoded.
Returns
-------
Any
A Python data structure corresponding to the decoded version of ``r``.
See Also
--------
encode
Inverse function.
"""
# structural recursion over the possible shapes of r
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_inst:
cls = locate(r["class"])
args = decode(r["args"]) if "args" in r else []
kwargs = decode(r["kwargs"]) if "kwargs" in r else {}
return cls(*args, **kwargs)
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_type:
return locate(r["class"])
# r = { k1: v1, ..., kn: vn }
elif type(r) == dict:
return {k: decode(v) for k, v in r.items()}
# r = ( y1, ..., yn )
elif type(r) == tuple:
return tuple([decode(y) for y in r])
# r = [ y1, ..., yn ]
elif type(r) == list:
return [decode(y) for y in r]
# r = { y1, ..., yn }
elif type(r) == set:
return {decode(y) for y in r}
# r = a
else:
return r
| 15,427 | 25.327645 | 78 | py |
rankpredictor | rankpredictor-master/sub/gluonts/core/component.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import functools
import inspect
import logging
import os
import re
from collections import OrderedDict
from functools import singledispatch
from pydoc import locate
from typing import Any, Type, TypeVar, Union
# Third-party imports
import mxnet as mx
import numpy as np
from pydantic import BaseConfig, BaseModel, ValidationError, create_model
# First-party imports
from gluonts.core.exception import GluonTSHyperparametersError
from gluonts.core.serde import dump_code
from gluonts.monkey_patch import monkey_patch_property_metaclass # noqa: F401
# Relative imports
from . import fqname_for
DEBUG = os.environ.get("DEBUG", "false").lower() == "true"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)
A = TypeVar("A")
def from_hyperparameters(cls: Type[A], **hyperparameters) -> A:
"""
Reflectively create an instance of a class with a :func:`validated`
initializer.
Parameters
----------
cls
The type ``A`` of the component to be instantiated.
hyperparameters
A dictionary of key-value pairs to be used as parameters to the
component initializer.
Returns
-------
A
An instance of the given class.
Raises
------
GluonTSHyperparametersError
Wraps a :class:`ValidationError` thrown when validating the
initializer parameters.
"""
Model = getattr(cls.__init__, "Model", None)
if not Model:
raise AttributeError(
f"Cannot find attribute Model attached to the "
f"{fqname_for(cls)}. Most probably you have forgotten to mark "
f"the class initializer as @validated()."
)
try:
return cls(**Model(**hyperparameters).__dict__) # type: ignore
except ValidationError as e:
raise GluonTSHyperparametersError from e
@singledispatch
def equals(this: Any, that: Any) -> bool:
"""
Structural equality check between two objects of arbitrary type.
By default, this function delegates to :func:`equals_default_impl`.
In addition, the function dispatches to specialized implementations based
on the type of the first argument, so the above conditions might be
sticter for certain types.
Parameters
----------
this, that
Objects to compare.
Returns
-------
bool
A boolean value indicating whether ``this`` and ``that`` are
structurally equal.
See Also
--------
equals_default_impl
Default semantics of a structural equality check between two objects
of arbitrary type.
equals_representable_block
Specialization for Gluon :class:`~mxnet.gluon.HybridBlock` input
arguments.
equals_parameter_dict
Specialization for Gluon :class:`~mxnet.gluon.ParameterDict` input
arguments.
"""
return equals_default_impl(this, that)
def equals_default_impl(this: Any, that: Any) -> bool:
"""
Default semantics of a structural equality check between two objects of
arbitrary type.
Two objects ``this`` and ``that`` are defined to be structurally equal
if and only if the following criteria are satisfied:
1. Their types match.
2. If their initializer are :func:`validated`, their initializer arguments
are pairlise structurally equal.
3. If their initializer are not :func:`validated`, they are referentially
equal (i.e. ``this == that``).
Parameters
----------
this, that
Objects to compare.
Returns
-------
bool
A boolean value indicating whether ``this`` and ``that`` are
structurally equal.
"""
if type(this) != type(that):
return False
elif hasattr(this, "__init_args__") and hasattr(that, "__init_args__"):
this_args = getattr(this, "__init_args__")
that_args = getattr(that, "__init_args__")
return equals(this_args, that_args)
else:
return this == that
@equals.register(list)
def equals_list(this: list, that: list) -> bool:
if not len(this) == len(that):
return False
for x, y in zip(this, that):
if not equals(x, y):
return False
return True
@equals.register(dict)
def equals_dict(this: dict, that: dict) -> bool:
this_keys = this.keys()
that_keys = that.keys()
if not this_keys == that_keys:
return False
for name in this_keys:
x = this[name]
y = that[name]
if not equals(x, y):
return False
return True
@equals.register(mx.gluon.HybridBlock)
def equals_representable_block(
this: mx.gluon.HybridBlock, that: mx.gluon.HybridBlock
) -> bool:
"""
Structural equality check between two :class:`~mxnet.gluon.HybridBlock`
objects with :func:`validated` initializers.
Two blocks ``this`` and ``that`` are considered *structurally equal* if all
the conditions of :func:`equals` are met, and in addition their parameter
dictionaries obtained with
:func:`~mxnet.gluon.block.Block.collect_params` are also structurally
equal.
Specializes :func:`equals` for invocations where the first parameter is an
instance of the :class:`~mxnet.gluon.HybridBlock` class.
Parameters
----------
this, that
Objects to compare.
Returns
-------
bool
A boolean value indicating whether ``this`` and ``that`` are
structurally equal.
See Also
--------
equals
Dispatching function.
equals_parameter_dict
Specialization of :func:`equals` for Gluon
:class:`~mxnet.gluon.ParameterDict` input arguments.
"""
if not equals_default_impl(this, that):
return False
if not equals_parameter_dict(this.collect_params(), that.collect_params()):
return False
return True
@equals.register(mx.gluon.ParameterDict)
def equals_parameter_dict(
this: mx.gluon.ParameterDict, that: mx.gluon.ParameterDict
) -> bool:
"""
Structural equality check between two :class:`~mxnet.gluon.ParameterDict`
objects.
Two parameter dictionaries ``this`` and ``that`` are considered
*structurally equal* if the following conditions are satisfied:
1. They contain the same keys (modulo the key prefix which is stripped).
2. The data in the corresponding value pairs is equal, as defined by the
:func:`~mxnet.test_utils.almost_equal` function (in this case we call
the function with ``equal_nan=True``, that is, two aligned ``NaN``
values are always considered equal).
Specializes :func:`equals` for invocations where the first parameter is an
instance of the :class:`~mxnet.gluon.ParameterDict` class.
Parameters
----------
this, that
Objects to compare.
Returns
-------
bool
A boolean value indicating whether ``this`` and ``that`` are
structurally equal.
See Also
--------
equals
Dispatching function.
"""
if type(this) != type(that):
return False
this_prefix_length = len(this.prefix)
that_prefix_length = len(that.prefix)
this_param_names_stripped = {
key[this_prefix_length:] if key.startswith(this.prefix) else key
for key in this.keys()
}
that_param_names_stripped = {
key[that_prefix_length:] if key.startswith(that.prefix) else key
for key in that.keys()
}
if not this_param_names_stripped == that_param_names_stripped:
return False
for this_param_name, that_param_name in zip(this.keys(), that.keys()):
x = this[this_param_name].data().asnumpy()
y = that[that_param_name].data().asnumpy()
if not mx.test_utils.almost_equal(x, y, equal_nan=True):
return False
return True
@equals.register(np.ndarray)
def equals_ndarray(this: np.ndarray, that: np.ndarray) -> bool:
return np.shape == np.shape and np.all(this == that)
class BaseValidatedInitializerModel(BaseModel):
"""
Base Pydantic model for components with :func:`validated` initializers.
See Also
--------
validated
Decorates an initializer methods with argument validation logic.
"""
class Config(BaseConfig):
"""
`Config <https://pydantic-docs.helpmanual.io/#model-config>`_ for the
Pydantic model inherited by all :func:`validated` initializers.
Allows the use of arbitrary type annotations in initializer parameters.
"""
arbitrary_types_allowed = True
def validated(base_model=None):
"""
Decorates an ``__init__`` method with typed parameters with validation
and auto-conversion logic.
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
Classes with decorated initializers can be instantiated using arguments of
another type (e.g. an ``y`` argument of type ``str`` ). The decorator
handles the type conversion logic.
>>> c = ComplexNumber(y='42')
>>> (c.x, c.y)
(0.0, 42.0)
If the bound argument cannot be converted, the decorator throws an error.
>>> c = ComplexNumber(y=None)
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for ComplexNumberModel
y
none is not an allowed value (type=type_error.none.not_allowed)
Internally, the decorator delegates all validation and conversion logic to
`a Pydantic model <https://pydantic-docs.helpmanual.io/>`_, which can be
accessed through the ``Model`` attribute of the decorated initiazlier.
>>> ComplexNumber.__init__.Model
<class 'ComplexNumberModel'>
The Pydantic model is synthesized automatically from on the parameter
names and types of the decorated initializer. In the ``ComplexNumber``
example, the synthesized Pydantic model corresponds to the following
definition.
>>> class ComplexNumberModel(BaseValidatedInitializerModel):
... x: float = 0.0
... y: float = 0.0
Clients can optionally customize the base class of the synthesized
Pydantic model using the ``base_model`` decorator parameter. The default
behavior uses :class:`BaseValidatedInitializerModel` and its
`model config <https://pydantic-docs.helpmanual.io/#config>`_.
See Also
--------
BaseValidatedInitializerModel
Default base class for all synthesized Pydantic models.
"""
def validator(init):
init_qualname = dict(inspect.getmembers(init))["__qualname__"]
init_clsnme = init_qualname.split(".")[0]
init_params = inspect.signature(init).parameters
init_fields = {
param.name: (
param.annotation
if param.annotation != inspect.Parameter.empty
else Any,
param.default
if param.default != inspect.Parameter.empty
else ...,
)
for param in init_params.values()
if param.name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if base_model is None:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__config__=BaseValidatedInitializerModel.Config,
**init_fields,
)
else:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__base__=base_model,
**init_fields,
)
def validated_repr(self) -> str:
return dump_code(self)
def validated_getnewargs_ex(self):
return (), self.__init_args__
@functools.wraps(init)
def init_wrapper(*args, **kwargs):
self, *args = args
nmargs = {
name: arg
for (name, param), arg in zip(
list(init_params.items()), [self] + args
)
if name != "self"
}
model = PydanticModel(**{**nmargs, **kwargs})
# merge nmargs, kwargs, and the model fields into a single dict
all_args = {**nmargs, **kwargs, **model.__dict__}
# save the merged dictionary for Representable use, but only of the
# __init_args__ is not already set in order to avoid overriding a
# value set by a subclass initializer in super().__init__ calls
if not getattr(self, "__init_args__", {}):
self.__init_args__ = OrderedDict(
{
name: arg
for name, arg in sorted(all_args.items())
if type(arg) != mx.gluon.ParameterDict
}
)
self.__class__.__getnewargs_ex__ = validated_getnewargs_ex
self.__class__.__repr__ = validated_repr
return init(self, **all_args)
# attach the Pydantic model as the attribute of the initializer wrapper
setattr(init_wrapper, "Model", PydanticModel)
return init_wrapper
return validator
class MXContext:
"""
Defines `custom data type validation
<https://pydantic-docs.helpmanual.io/#custom-data-types>`_ for
the :class:`~mxnet.context.Context` data type.
"""
@classmethod
def validate(cls, v: Union[str, mx.Context]) -> mx.Context:
if isinstance(v, mx.Context):
return v
m = re.search(r"^(?P<dev_type>cpu|gpu)(\((?P<dev_id>\d+)\))?$", v)
if m:
return mx.Context(m["dev_type"], int(m["dev_id"] or 0))
else:
raise ValueError(
f"bad MXNet context {v}, expected either an "
f"mx.context.Context or its string representation"
)
@classmethod
def __get_validators__(cls) -> mx.Context:
yield cls.validate
mx.Context.validate = MXContext.validate
mx.Context.__get_validators__ = MXContext.__get_validators__
NUM_GPUS = None
def num_gpus(refresh=False):
global NUM_GPUS
if NUM_GPUS is None or refresh:
n = 0
try:
n = mx.context.num_gpus()
except mx.base.MXNetError as e:
logger.error(f"Failure when querying GPU: {e}")
NUM_GPUS = n
return NUM_GPUS
def get_mxnet_context(gpu_number=0) -> mx.Context:
"""
Returns either CPU or GPU context
"""
n = num_gpus()
if n == 0:
logging.info("Using CPU")
return mx.context.cpu()
else:
logging.info("Using GPU")
return mx.context.gpu(gpu_number)
def check_gpu_support() -> bool:
"""
Emits a log line and returns a boolean that indicate whether
the currently installed MXNet version has GPU support.
"""
n = num_gpus()
logger.info(f'MXNet GPU support is {"ON" if n > 0 else "OFF"}')
return False if n == 0 else True
class DType:
"""
Defines `custom data type validation
<https://pydantic-docs.helpmanual.io/#custom-data-types>`_ for ``type``
instances.
Parameters annotated with :class:`DType` can be bound to string arguments
representing the fully-qualified type name. The validation logic
defined here attempts to automatically load the type as part of the
conversion process.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if isinstance(v, str):
return locate(v)
if isinstance(v, type):
return v
else:
raise ValueError(
f"bad value {v} of type {type(v)}, expected a type or a string"
)
| 16,390 | 28.910584 | 86 | py |
rankpredictor | rankpredictor-master/sub/gluonts/dataset/loader.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import itertools
from collections import defaultdict
from typing import Any, Dict, Iterable, Iterator, List, Optional # noqa: F401
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.core.component import DType
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.transform import Transformation
DataBatch = Dict[str, Any]
class BatchBuffer:
def __init__(
self, batch_size: int, ctx: mx.Context, dtype: DType = np.float32
) -> None:
self._buffers: Dict[Any, List[Any]] = defaultdict(list)
self.batch_size = batch_size
self._size = 0
self.ctx = ctx
self.dtype = dtype
def add(self, d: Dict[str, List[np.ndarray]]):
if self._buffers:
assert self._buffers.keys() == d.keys()
for k, v in d.items():
self._buffers[k].append(v)
self._size += 1
def __len__(self):
return self._size
def next_batch(self) -> DataBatch:
assert self._size > 0
n = min(self._size, self.batch_size)
batch = {k: self.stack(v[:n]) for k, v in self._buffers.items()}
for key in self._buffers.keys():
self._buffers[key] = self._buffers[key][n:]
self._size -= n
return batch
def stack(self, xs):
if isinstance(xs[0], np.ndarray):
data = np.asarray(xs)
if data.dtype.kind == "f":
data = data.astype(self.dtype)
return mx.nd.array(data, dtype=data.dtype, ctx=self.ctx)
elif isinstance(xs[0], mx.nd.NDArray):
return mx.nd.stack(*xs)
elif isinstance(xs[0], list):
return [self.stack(t) for t in zip(*[x for x in xs])]
elif isinstance(xs[0], tuple):
return tuple([self.stack(t) for t in zip(*[x for x in xs])])
else:
return xs # stack all other types as list
def shuffle(self):
perm = np.random.permutation(self._size)
for key in self._buffers.keys():
li = self._buffers[key]
self._buffers[key] = [li[i] for i in perm]
class DataLoader(Iterable[DataEntry]):
"""
An abstract Iterable type for iterating and transforming a dataset,
in batches of a prescribed size.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
ctx
MXNet context to use to store data.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
ctx: mx.Context,
dtype: DType = np.float32,
) -> None:
self.dataset = dataset
self.transform = transform
self.batch_size = batch_size
self.ctx = ctx
self.dtype = dtype
class TrainDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset, in batches of a
prescribed size, until a given number of batches is reached.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
ctx
MXNet context to use to store data.
num_batches_per_epoch
Number of batches to return in one complete iteration over this object.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
ctx: mx.Context,
num_batches_per_epoch: int,
dtype: DType = np.float32,
shuffle_for_training: bool = True,
num_batches_for_shuffling: int = 10,
) -> None:
super().__init__(dataset, transform, batch_size, ctx, dtype)
self.num_batches_per_epoch = num_batches_per_epoch
self.shuffle_for_training = shuffle_for_training
self._num_buffered_batches = (
num_batches_for_shuffling if shuffle_for_training else 1
)
self._cur_iter: Optional[Iterator] = None
self._buffer = BatchBuffer(self.batch_size, ctx, dtype)
def _emit_batches_while_buffer_larger_than(
self, thresh
) -> Iterator[DataBatch]:
if self.shuffle_for_training:
self._buffer.shuffle()
while len(self._buffer) > thresh:
yield self._buffer.next_batch()
def _iterate_forever(
self, collection: Iterable[DataEntry]
) -> Iterator[DataEntry]:
# iterate forever over the collection, the collection must be non empty
while True:
try:
first = next(iter(collection))
except StopIteration:
raise Exception("empty dataset")
else:
for x in itertools.chain([first], collection):
yield x
def __len__(self) -> int:
return self.num_batches_per_epoch
def __iter__(self) -> Iterator[DataBatch]:
batch_count = 0
if self._cur_iter is None:
self._cur_iter = self.transform(
self._iterate_forever(self.dataset), is_train=True
)
assert self._cur_iter is not None
while True:
data_entry = next(self._cur_iter)
self._buffer.add(data_entry)
if (
len(self._buffer)
>= self._num_buffered_batches * self.batch_size
):
for batch in self._emit_batches_while_buffer_larger_than(
self.batch_size - 1
):
yield batch
batch_count += 1
if batch_count >= self.num_batches_per_epoch:
return
class ValidationDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
ctx
MXNet context to use to store data.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.ctx, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=True):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
class InferenceDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in inference mode, i.e. with the flag
`is_train = False`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
ctx
MXNet context to use to store data.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.ctx, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=False):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
| 8,580 | 30.899628 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/transformed_distribution.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Optional, Tuple, List
# Third-party imports
from mxnet import autograd
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.model.common import Tensor
from gluonts.core.component import validated
# Relative imports
from . import bijection as bij
from .distribution import Distribution, getF
class TransformedDistribution(Distribution):
r"""
A distribution obtained by applying a sequence of transformations on top
of a base distribution.
"""
@validated()
def __init__(
self, base_distribution: Distribution, transforms: List[bij.Bijection]
) -> None:
self.base_distribution = base_distribution
self.transforms = transforms
self.is_reparameterizable = self.base_distribution.is_reparameterizable
# use these to cache shapes and avoid recomputing all steps
# the reason we cannot do the computations here directly
# is that this constructor would fail in mx.symbol mode
self._event_dim: Optional[int] = None
self._event_shape: Optional[Tuple] = None
self._batch_shape: Optional[Tuple] = None
@property
def event_dim(self):
if self._event_dim is None:
self._event_dim = max(
[self.base_distribution.event_dim]
+ [t.event_dim for t in self.transforms]
)
assert isinstance(self._event_dim, int)
return self._event_dim
@property
def batch_shape(self) -> Tuple:
if self._batch_shape is None:
shape = (
self.base_distribution.batch_shape
+ self.base_distribution.event_shape
)
self._batch_shape = shape[: len(shape) - self.event_dim]
assert isinstance(self._batch_shape, tuple)
return self._batch_shape
@property
def event_shape(self) -> Tuple:
if self._event_shape is None:
shape = (
self.base_distribution.batch_shape
+ self.base_distribution.event_shape
)
self._event_shape = shape[len(shape) - self.event_dim :]
assert isinstance(self._event_shape, tuple)
return self._event_shape
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
with autograd.pause():
s = self.base_distribution.sample(
num_samples=num_samples, dtype=dtype
)
for t in self.transforms:
s = t.f(s)
return s
def sample_rep(
self, num_samples: Optional[int] = None, dtype=np.float
) -> Tensor:
s = self.base_distribution.sample_rep(dtype=dtype)
for t in self.transforms:
s = t.f(s)
return s
def log_prob(self, y: Tensor) -> Tensor:
F = getF(y)
lp = 0.0
x = y
for t in self.transforms[::-1]:
x = t.f_inv(y)
ladj = t.log_abs_det_jac(x, y)
lp -= sum_trailing_axes(F, ladj, self.event_dim - t.event_dim)
y = x
return self.base_distribution.log_prob(x) + lp
def cdf(self, y: Tensor) -> Tensor:
x = y
sign = 1.0
for t in self.transforms[::-1]:
x = t.f_inv(x)
sign = sign * t.sign
f = self.base_distribution.cdf(x)
return sign * (f - 0.5) + 0.5
def quantile(self, level: Tensor) -> Tensor:
F = getF(level)
sign = 1.0
for t in self.transforms:
sign = sign * t.sign
if not isinstance(sign, (mx.nd.NDArray, mx.sym.Symbol)):
sign = sign + level.zeros_like()
cond = F.broadcast_greater(sign, sign.zeros_like())
level = F.broadcast_mul(cond, level) + F.broadcast_mul(
1.0 - cond, 1.0 - level
)
q = self.base_distribution.quantile(level)
for t in self.transforms:
q = t.f(q)
return q
def sum_trailing_axes(F, x: Tensor, k: int) -> Tensor:
for _ in range(k):
x = F.sum(x, axis=-1)
return x
| 4,693 | 30.503356 | 79 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/lowrank_multivariate_gaussian.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from typing import Optional, Tuple
# Third-party imports
import numpy as np
from mxnet import gluon
# First-party imports
from gluonts.core.component import validated
from gluonts.distribution import bijection
from gluonts.distribution.distribution import (
Distribution,
_sample_multiple,
getF,
)
from gluonts.distribution.distribution_output import (
ArgProj,
DistributionOutput,
TransformedDistribution,
)
from gluonts.model.common import Tensor
def capacitance_tril(F, rank: Tensor, W: Tensor, D: Tensor) -> Tensor:
r"""
Parameters
----------
F
rank
W : (..., dim, rank)
D : (..., dim)
Returns
-------
the capacitance matrix :math:`I + W^T D^{-1} W`
"""
# (..., dim, rank)
Wt_D_inv_t = F.broadcast_div(W, D.expand_dims(axis=-1))
# (..., rank, rank)
K = F.linalg_gemm2(Wt_D_inv_t, W, transpose_a=True)
# (..., rank, rank)
Id = F.broadcast_mul(F.ones_like(K), F.eye(rank))
# (..., rank, rank)
return F.linalg.potrf(K + Id)
def log_det(F, batch_D: Tensor, batch_capacitance_tril: Tensor) -> Tensor:
r"""
Uses the matrix determinant lemma.
.. math::
\log|D + W W^T| = \log|C| + \log|D|,
where :math:`C` is the capacitance matrix :math:`I + W^T D^{-1} W`, to compute the log determinant.
Parameters
----------
F
batch_D
batch_capacitance_tril
Returns
-------
"""
log_D = batch_D.log().sum(axis=-1)
log_C = 2 * F.linalg.sumlogdiag(batch_capacitance_tril)
return log_C + log_D
def mahalanobis_distance(
F, W: Tensor, D: Tensor, capacitance_tril: Tensor, x: Tensor
) -> Tensor:
r"""
Uses the Woodbury matrix identity
.. math::
(W W^T + D)^{-1} = D^{-1} - D^{-1} W C^{-1} W^T D^{-1},
where :math:`C` is the capacitance matrix :math:`I + W^T D^{-1} W`, to compute the squared
Mahalanobis distance :math:`x^T (W W^T + D)^{-1} x`.
Parameters
----------
F
W
(..., dim, rank)
D
(..., dim)
capacitance_tril
(..., rank, rank)
x
(..., dim)
Returns
-------
"""
xx = x.expand_dims(axis=-1)
# (..., rank, 1)
Wt_Dinv_x = F.linalg_gemm2(
F.broadcast_div(W, D.expand_dims(axis=-1)), xx, transpose_a=True
)
# compute x^T D^-1 x, (...,)
maholanobis_D_inv = F.broadcast_div(x.square(), D).sum(axis=-1)
# (..., rank)
L_inv_Wt_Dinv_x = F.linalg_trsm(capacitance_tril, Wt_Dinv_x).squeeze(
axis=-1
)
maholanobis_L = L_inv_Wt_Dinv_x.square().sum(axis=-1).squeeze()
return maholanobis_D_inv - maholanobis_L
def lowrank_log_likelihood(
F, dim: int, rank: int, mu: Tensor, D: Tensor, W: Tensor, x: Tensor
) -> Tensor:
dim_factor = dim * math.log(2 * math.pi)
batch_capacitance_tril = capacitance_tril(F=F, rank=rank, W=W, D=D)
log_det_factor = log_det(
F=F, batch_D=D, batch_capacitance_tril=batch_capacitance_tril
)
mahalanobis_factor = mahalanobis_distance(
F=F, W=W, D=D, capacitance_tril=batch_capacitance_tril, x=x - mu
)
ll: Tensor = -0.5 * (dim_factor + log_det_factor + mahalanobis_factor)
return ll
class LowrankMultivariateGaussian(Distribution):
r"""
Multivariate Gaussian distribution, with covariance matrix parametrized
as the sum of a diagonal matrix and a low-rank matrix
.. math::
\Sigma = D + W W^T
The implementation is strongly inspired from Pytorch:
https://github.com/pytorch/pytorch/blob/master/torch/distributions/lowrank_multivariate_normal.py.
Complexity to compute log_prob is :math:`O(dim * rank + rank^3)` per element.
Parameters
----------
dim
Dimension of the distribution's support
rank
Rank of W
mu
Mean tensor, of shape (..., dim)
D
Diagonal term in the covariance matrix, of shape (..., dim)
W
Low-rank factor in the covariance matrix, of shape (..., dim, rank)
"""
is_reparameterizable = True
@validated()
def __init__(
self, dim: int, rank: int, mu: Tensor, D: Tensor, W: Tensor
) -> None:
self.dim = dim
self.rank = rank
self.mu = mu
self.D = D
self.W = W
self.F = getF(mu)
self.Cov = None
@property
def batch_shape(self) -> Tuple:
return self.mu.shape[:-1]
@property
def event_shape(self) -> Tuple:
return self.mu.shape[-1:]
@property
def event_dim(self) -> int:
return 1
def log_prob(self, x: Tensor) -> Tensor:
return lowrank_log_likelihood(
F=self.F,
dim=self.dim,
rank=self.rank,
mu=self.mu,
D=self.D,
W=self.W,
x=x,
)
@property
def mean(self) -> Tensor:
return self.mu
@property
def variance(self) -> Tensor:
if self.Cov is not None:
return self.Cov
# reshape to a matrix form (..., d, d)
D_matrix = self.D.expand_dims(-1) * self.F.eye(self.dim)
W_matrix = self.F.linalg_gemm2(self.W, self.W, transpose_b=True)
self.Cov = D_matrix + W_matrix
return self.Cov
def sample_rep(self, num_samples: int = None, dtype=np.float32) -> Tensor:
r"""
Draw samples from the multivariate Gaussian distribution:
.. math::
s = \mu + D u + W v,
where :math:`u` and :math:`v` are standard normal samples.
Parameters
----------
num_samples
number of samples to be drawn.
dtype
Data-type of the samples.
Returns
-------
tensor with shape (num_samples, ..., dim)
"""
def s(mu: Tensor, D: Tensor, W: Tensor) -> Tensor:
F = getF(mu)
samples_D = F.sample_normal(
mu=F.zeros_like(mu), sigma=F.ones_like(mu), dtype=dtype
)
cov_D = D.sqrt() * samples_D
# dummy only use to get the shape (..., rank, 1)
dummy_tensor = F.linalg_gemm2(
W, mu.expand_dims(axis=-1), transpose_a=True
).squeeze(axis=-1)
samples_W = F.sample_normal(
mu=F.zeros_like(dummy_tensor),
sigma=F.ones_like(dummy_tensor),
dtype=dtype,
)
cov_W = F.linalg_gemm2(W, samples_W.expand_dims(axis=-1)).squeeze(
axis=-1
)
samples = mu + cov_D + cov_W
return samples
return _sample_multiple(
s, mu=self.mu, D=self.D, W=self.W, num_samples=num_samples
)
class LowrankMultivariateGaussianOutput(DistributionOutput):
@validated()
def __init__(self, dim: int, rank: int) -> None:
self.distr_cls = LowrankMultivariateGaussian
self.dim = dim
self.rank = rank
self.args_dim = {"mu": dim, "D": dim, "W": dim * rank}
self.mu_bias = 0.0
self.sigma_bias = 0.01
def get_args_proj(self, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
args_dim=self.args_dim,
domain_map=gluon.nn.HybridLambda(self.domain_map),
prefix=prefix,
)
def distribution(self, distr_args, scale=None, **kwargs) -> Distribution:
# todo dirty way of calling for now, this can be cleaned
distr = LowrankMultivariateGaussian(self.dim, self.rank, *distr_args)
if scale is None:
return distr
else:
return TransformedDistribution(
distr, [bijection.AffineTransformation(scale=scale)]
)
def domain_map(self, F, mu_vector, D_vector, W_vector):
r"""
Parameters
----------
F
mu_vector
Tensor of shape (..., dim)
D_vector
Tensor of shape (..., dim)
W_vector
Tensor of shape (..., dim * rank )
Returns
-------
Tuple
A tuple containing tensors mu, D, and W, with shapes
(..., dim), (..., dim), and (..., dim, rank), respectively.
"""
def inv_softplus(y):
if y < 20.0:
# y = log(1 + exp(x)) ==> x = log(exp(y) - 1)
return np.log(np.exp(y) - 1)
else:
return y
# reshape from vector form (..., d * rank) to matrix form (..., d, rank)
W_matrix = W_vector.reshape((-2, self.dim, self.rank, -4), reverse=1)
# apply softplus to D_vector and reshape coefficient of W_vector to a matrix
D_diag = F.Activation(
D_vector + inv_softplus(self.sigma_bias ** 2), act_type="softrelu"
)
return mu_vector + self.mu_bias, D_diag, W_matrix
@property
def event_shape(self) -> Tuple:
return (self.dim,)
| 9,527 | 25.393352 | 103 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/binned.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple, List
# Third-party imports
import mxnet as mx
from mxnet import gluon
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.model.common import Tensor
# Relative imports
from .distribution import Distribution, _sample_multiple, getF
from .distribution_output import DistributionOutput
class Binned(Distribution):
r"""
A binned distribution defined by a set of bins via
bin centers and bin probabilities.
Parameters
----------
bin_probs
Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.
bin_centers
Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.
F
"""
is_reparameterizable = False
@validated()
def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:
self.bin_centers = bin_centers
self.bin_probs = bin_probs
self.F = F if F else getF(bin_probs)
self.bin_edges = Binned._compute_edges(self.F, bin_centers)
@staticmethod
def _compute_edges(F, bin_centers: Tensor) -> Tensor:
r"""
Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and
:math:`10^{10}`, repsectively.
Parameters
----------
F
bin_centers
Tensor of shape `(*batch_shape, num_bins)`.
Returns
-------
Tensor
Tensor of shape (*batch.shape, num_bins+1)
"""
low = (
F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
- 1.0e10
)
high = (
F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
+ 1.0e10
)
means = (
bin_centers.slice_axis(axis=-1, begin=1, end=None)
+ bin_centers.slice_axis(axis=-1, begin=0, end=-1)
) / 2.0
return F.concat(low, means, high, dim=-1)
@property
def batch_shape(self) -> Tuple:
return self.bin_probs.shape[:-1]
@property
def event_shape(self) -> Tuple:
return ()
@property
def event_dim(self) -> int:
return 0
@property
def mean(self):
return (self.bin_probs * self.bin_centers).sum(axis=-1)
@property
def stddev(self):
Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)
return (Ex2 - self.mean.square()).sqrt()
def log_prob(self, x):
F = self.F
x = x.expand_dims(axis=-1)
# TODO: when mxnet has searchsorted replace this
left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)
mask = F.broadcast_lesser_equal(left_edges, x) * F.broadcast_lesser(
x, right_edges
)
return F.broadcast_mul(self.bin_probs.log(), mask).sum(axis=-1)
def cdf(self, x: Tensor) -> Tensor:
F = self.F
x = x.expand_dims(axis=-1)
# left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
mask = F.broadcast_lesser_equal(self.bin_centers, x)
return F.broadcast_mul(self.bin_probs, mask).sum(axis=-1)
def quantile(self, level: Tensor) -> Tensor:
F = self.F
probs = self.bin_probs.swapaxes(0, 1) # (num_bins, batch)
zeros_batch_size = F.slice_axis(probs, axis=0, begin=0, end=1).squeeze(
axis=0
) # (batch_size,)
level = level.expand_dims(axis=0)
# cdf shape (batch_size, levels)
zeros_cdf = F.broadcast_add(
zeros_batch_size.expand_dims(axis=1), level.zeros_like()
)
start_state = (zeros_cdf, zeros_cdf.astype("int32"))
def step(p, state):
cdf, idx = state
cdf = F.broadcast_add(cdf, p.expand_dims(axis=1))
idx = F.where(F.broadcast_greater(cdf, level), idx, idx + 1)
return zeros_batch_size, (cdf, idx)
_, states = F.contrib.foreach(step, probs, start_state)
_, idx = states
# expand centers to shape (batch, levels, num_bins)
# so we can use pick with idx.shape = (batch, levels)
centers_expanded = F.broadcast_add(
self.bin_centers.expand_dims(axis=1),
zeros_cdf.expand_dims(axis=-1),
)
a = centers_expanded.pick(idx, axis=-1)
return a.swapaxes(0, 1)
def sample(self, num_samples=None, dtype=np.float32):
def s(bin_probs):
F = self.F
indices = F.sample_multinomial(bin_probs)
if num_samples is None:
return self.bin_centers.pick(indices, -1).reshape_like(
F.zeros_like(indices.astype("float32"))
)
else:
return F.repeat(
F.expand_dims(self.bin_centers, axis=0),
repeats=num_samples,
axis=0,
).pick(indices, -1)
return _sample_multiple(s, self.bin_probs, num_samples=num_samples)
@property
def args(self) -> List:
return [self.bin_probs, self.bin_centers]
class BinnedArgs(gluon.HybridBlock):
def __init__(
self, num_bins: int, bin_centers: mx.nd.NDArray, **kwargs
) -> None:
super().__init__(**kwargs)
self.num_bins = num_bins
with self.name_scope():
self.bin_centers = self.params.get_constant(
"bin_centers", bin_centers
)
# needs to be named self.proj for consistency with the
# ArgProj class and the inference tests
self.proj = gluon.nn.HybridSequential()
self.proj.add(
gluon.nn.Dense(
self.num_bins,
prefix="binproj",
flatten=False,
weight_initializer=mx.init.Xavier(),
)
)
self.proj.add(gluon.nn.HybridLambda("softmax"))
def hybrid_forward(
self, F, x: Tensor, bin_centers: Tensor
) -> Tuple[Tensor, Tensor]:
ps = self.proj(x)
reshaped_probs = ps.reshape(shape=(-2, -1, self.num_bins), reverse=1)
bin_centers = F.broadcast_add(bin_centers, ps.zeros_like())
return reshaped_probs, bin_centers
class BinnedOutput(DistributionOutput):
distr_cls: type = Binned
@validated()
def __init__(self, bin_centers: mx.nd.NDArray) -> None:
self.bin_centers = bin_centers
self.num_bins = self.bin_centers.shape[0]
assert len(self.bin_centers.shape) == 1
def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:
return BinnedArgs(self.num_bins, self.bin_centers)
def distribution(self, args, scale=None) -> Binned:
probs = args[0]
bin_centers = args[1]
F = getF(probs)
bin_centers = F.broadcast_mul(bin_centers, F.ones_like(probs))
if scale is not None:
bin_centers = F.broadcast_mul(
bin_centers, scale.expand_dims(axis=-1)
)
return Binned(probs, bin_centers)
@property
def event_shape(self) -> Tuple:
return ()
| 7,830 | 30.963265 | 116 | py |
rankpredictor | rankpredictor-master/sub/gluonts/distribution/distribution.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet import autograd
import numpy as np
# First-party imports
from gluonts.model.common import Tensor
def nans_like(x: Tensor) -> Tensor:
return x.zeros_like() / 0.0
def softplus(F, x: Tensor) -> Tensor:
return F.Activation(x, act_type="softrelu")
def getF(var: Tensor):
if isinstance(var, mx.nd.NDArray):
return mx.nd
elif isinstance(var, mx.sym.Symbol):
return mx.sym
else:
raise RuntimeError("var must be instance of NDArray or Symbol in getF")
class Distribution:
r"""
A class representing probability distributions.
"""
arg_names: Tuple
is_reparameterizable = False
def log_prob(self, x: Tensor) -> Tensor:
r"""
Compute the log-density of the distribution at `x`.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the log-density of the
distribution for each event in `x`.
"""
raise NotImplementedError()
def crps(self, x: Tensor) -> Tensor:
r"""
Compute the *continuous rank probability score* (CRPS) of `x` according
to the distribution.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the CRPS score,
according to the distribution, for each event in `x`.
"""
raise NotImplementedError()
def loss(self, x: Tensor) -> Tensor:
r"""
Compute the loss at `x` according to the distribution.
By default, this method returns the negative of `log_prob`. For some
distributions, however, the log-density is not easily computable
and therefore other loss functions are computed.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the value of the loss
for each event in `x`.
"""
return -self.log_prob(x)
def prob(self, x: Tensor) -> Tensor:
r"""
Compute the density of the distribution at `x`.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the density of the
distribution for each event in `x`.
"""
return self.log_prob(x).exp()
@property
def batch_shape(self) -> Tuple:
r"""
Layout of the set of events contemplated by the distribution.
Invoking `sample()` from a distribution yields a tensor of shape
`batch_shape + event_shape`, and computing `log_prob` (or `loss`
more in general) on such sample will yield a tensor of shape
`batch_shape`.
This property is available in general only in mx.ndarray mode,
when the shape of the distribution arguments can be accessed.
"""
raise NotImplementedError()
@property
def event_shape(self) -> Tuple:
r"""
Shape of each individual event contemplated by the distribution.
For example, distributions over scalars have `event_shape = ()`,
over vectors have `event_shape = (d, )` where `d` is the length
of the vectors, over matrices have `event_shape = (d1, d2)`, and
so on.
Invoking `sample()` from a distribution yields a tensor of shape
`batch_shape + event_shape`.
This property is available in general only in mx.ndarray mode,
when the shape of the distribution arguments can be accessed.
"""
raise NotImplementedError()
@property
def event_dim(self) -> int:
r"""
Number of event dimensions, i.e., length of the `event_shape` tuple.
This is `0` for distributions over scalars, `1` over vectors,
`2` over matrices, and so on.
"""
raise NotImplementedError()
@property
def batch_dim(self) -> int:
r"""
Number of batch dimensions, i.e., length of the `batch_shape` tuple.
"""
return len(self.batch_shape)
@property
def all_dim(self) -> int:
r"""
Number of overall dimensions.
"""
return self.batch_dim + self.event_dim
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
r"""
Draw samples from the distribution.
If num_samples is given the first dimension of the output will be
num_samples.
Parameters
----------
num_samples
Number of samples to to be drawn.
dtype
Data-type of the samples.
Returns
-------
Tensor
A tensor containing samples. This has shape
`(*batch_shape, *eval_shape)` if `num_samples = None`
and `(num_samples, *batch_shape, *eval_shape)` otherwise.
"""
with autograd.pause():
var = self.sample_rep(num_samples=num_samples, dtype=dtype)
F = getF(var)
return F.BlockGrad(var)
def sample_rep(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
raise NotImplementedError()
@property
def args(self) -> List:
raise NotImplementedError()
@property
def mean(self) -> Tensor:
r"""
Tensor containing the mean of the distribution.
"""
raise NotImplementedError()
@property
def stddev(self) -> Tensor:
r"""
Tensor containing the standard deviation of the distribution.
"""
raise NotImplementedError()
@property
def variance(self) -> Tensor:
r"""
Tensor containing the variance of the distribution.
"""
return self.stddev.square()
def cdf(self, x: Tensor) -> Tensor:
r"""
Returns the value of the cumulative distribution function evaluated at x
"""
raise NotImplementedError()
def quantile(self, level: Tensor) -> Tensor:
r"""
Calculates quantiles for the given levels.
Parameters
----------
level
Level values to use for computing the quantiles.
`level` should be a 1d tensor of level values between 0 and 1.
Returns
-------
quantiles
Quantile values corresponding to the levels passed.
The return shape is
(num_levels, ...DISTRIBUTION_SHAPE...),
where DISTRIBUTION_SHAPE is the shape of the underlying distribution.
"""
raise NotImplementedError()
def slice_axis(
self, axis: int, begin: int, end: Optional[int]
) -> "Distribution":
"""
Construct a new distribution by slicing all constructor arguments
as specified by the provided bounds. Relies on ``mx.nd.slice_axis``.
"""
sliced_distr = self.__class__(
*[arg.slice_axis(axis, begin, end) for arg in self.args]
)
assert isinstance(sliced_distr, type(self))
return sliced_distr
def _expand_param(p: Tensor, num_samples: Optional[int] = None) -> Tensor:
"""
Expand parameters by num_samples along the first dimension.
"""
if num_samples is None:
return p
return p.expand_dims(axis=0).repeat(axis=0, repeats=num_samples)
def _sample_multiple(
sample_func, *args, num_samples: Optional[int] = None, **kwargs
) -> Tensor:
"""
Sample from the sample_func, by passing expanded args and kwargs and
reshaping the returned samples afterwards.
"""
args_expanded = [_expand_param(a, num_samples) for a in args]
kwargs_expanded = {
k: _expand_param(v, num_samples) for k, v in kwargs.items()
}
samples = sample_func(*args_expanded, **kwargs_expanded)
return samples
| 8,844 | 28.095395 | 81 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.