|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
class Model(nn.Module): |
|
|
""" |
|
|
Paper link: https://arxiv.org/pdf/2311.06184.pdf |
|
|
""" |
|
|
|
|
|
def __init__(self, configs): |
|
|
super(Model, self).__init__() |
|
|
self.task_name = configs.task_name |
|
|
if self.task_name == 'classification' or self.task_name == 'anomaly_detection' or self.task_name == 'imputation': |
|
|
self.pred_len = configs.seq_len |
|
|
else: |
|
|
self.pred_len = configs.pred_len |
|
|
self.embed_size = 128 |
|
|
self.hidden_size = 256 |
|
|
self.pred_len = configs.pred_len |
|
|
self.feature_size = configs.enc_in |
|
|
self.seq_len = configs.seq_len |
|
|
self.channel_independence = configs.channel_independence |
|
|
self.sparsity_threshold = 0.01 |
|
|
self.scale = 0.02 |
|
|
self.embeddings = nn.Parameter(torch.randn(1, self.embed_size)) |
|
|
self.r1 = nn.Parameter(self.scale * torch.randn(self.embed_size, self.embed_size)) |
|
|
self.i1 = nn.Parameter(self.scale * torch.randn(self.embed_size, self.embed_size)) |
|
|
self.rb1 = nn.Parameter(self.scale * torch.randn(self.embed_size)) |
|
|
self.ib1 = nn.Parameter(self.scale * torch.randn(self.embed_size)) |
|
|
self.r2 = nn.Parameter(self.scale * torch.randn(self.embed_size, self.embed_size)) |
|
|
self.i2 = nn.Parameter(self.scale * torch.randn(self.embed_size, self.embed_size)) |
|
|
self.rb2 = nn.Parameter(self.scale * torch.randn(self.embed_size)) |
|
|
self.ib2 = nn.Parameter(self.scale * torch.randn(self.embed_size)) |
|
|
|
|
|
self.fc = nn.Sequential( |
|
|
nn.Linear(self.seq_len * self.embed_size, self.hidden_size), |
|
|
nn.LeakyReLU(), |
|
|
nn.Linear(self.hidden_size, self.pred_len) |
|
|
) |
|
|
|
|
|
|
|
|
def tokenEmb(self, x): |
|
|
|
|
|
x = x.permute(0, 2, 1) |
|
|
x = x.unsqueeze(3) |
|
|
|
|
|
y = self.embeddings |
|
|
return x * y |
|
|
|
|
|
|
|
|
def MLP_temporal(self, x, B, N, L): |
|
|
|
|
|
x = torch.fft.rfft(x, dim=2, norm='ortho') |
|
|
y = self.FreMLP(B, N, L, x, self.r2, self.i2, self.rb2, self.ib2) |
|
|
x = torch.fft.irfft(y, n=self.seq_len, dim=2, norm="ortho") |
|
|
return x |
|
|
|
|
|
|
|
|
def MLP_channel(self, x, B, N, L): |
|
|
|
|
|
x = x.permute(0, 2, 1, 3) |
|
|
|
|
|
x = torch.fft.rfft(x, dim=2, norm='ortho') |
|
|
y = self.FreMLP(B, L, N, x, self.r1, self.i1, self.rb1, self.ib1) |
|
|
x = torch.fft.irfft(y, n=self.feature_size, dim=2, norm="ortho") |
|
|
x = x.permute(0, 2, 1, 3) |
|
|
|
|
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def FreMLP(self, B, nd, dimension, x, r, i, rb, ib): |
|
|
o1_real = torch.zeros([B, nd, dimension // 2 + 1, self.embed_size], |
|
|
device=x.device) |
|
|
o1_imag = torch.zeros([B, nd, dimension // 2 + 1, self.embed_size], |
|
|
device=x.device) |
|
|
|
|
|
o1_real = F.relu( |
|
|
torch.einsum('bijd,dd->bijd', x.real, r) - \ |
|
|
torch.einsum('bijd,dd->bijd', x.imag, i) + \ |
|
|
rb |
|
|
) |
|
|
|
|
|
o1_imag = F.relu( |
|
|
torch.einsum('bijd,dd->bijd', x.imag, r) + \ |
|
|
torch.einsum('bijd,dd->bijd', x.real, i) + \ |
|
|
ib |
|
|
) |
|
|
|
|
|
y = torch.stack([o1_real, o1_imag], dim=-1) |
|
|
y = F.softshrink(y, lambd=self.sparsity_threshold) |
|
|
y = torch.view_as_complex(y) |
|
|
return y |
|
|
|
|
|
def forecast(self, x_enc): |
|
|
|
|
|
B, T, N = x_enc.shape |
|
|
|
|
|
x = self.tokenEmb(x_enc) |
|
|
bias = x |
|
|
|
|
|
if self.channel_independence == '0': |
|
|
x = self.MLP_channel(x, B, N, T) |
|
|
|
|
|
x = self.MLP_temporal(x, B, N, T) |
|
|
x = x + bias |
|
|
x = self.fc(x.reshape(B, N, -1)).permute(0, 2, 1) |
|
|
return x |
|
|
|
|
|
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec): |
|
|
if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast': |
|
|
dec_out = self.forecast(x_enc) |
|
|
return dec_out[:, -self.pred_len:, :] |
|
|
else: |
|
|
raise ValueError('Only forecast tasks implemented yet') |
|
|
|