pchen182224 commited on
Commit
c882c3e
·
verified ·
1 Parent(s): e44570d

Upload 9 files

Browse files
LightGTS-huggingface/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "act": "gelu",
3
+ "attn_dropout": 0.4,
4
+ "c_in": 1,
5
+ "context_points": 528,
6
+ "d_ff": 512,
7
+ "d_layers": 3,
8
+ "d_model": 256,
9
+ "dropout": 0.0,
10
+ "e_layers": 3,
11
+ "head_dropout": 0,
12
+ "head_type": "prediction",
13
+ "initializer_range": 0.02,
14
+ "mask_mode": "patch",
15
+ "mask_nums": 3,
16
+ "model_type": "LightGTS",
17
+ "n_heads": 16,
18
+ "num_patch": 11,
19
+ "patch_len": 48,
20
+ "shared_embedding": true,
21
+ "stride": 48,
22
+ "target_dim": 192,
23
+ "transformers_version": "4.30.2"
24
+ }
LightGTS-huggingface/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac0de5227afaac3014d45de31e650b6e277fac7c7628dd897bd49c4a6f4dad91
3
+ size 16018929
__init__.py ADDED
File without changes
configuration_LightGTS.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import Optional
3
+ import math
4
+
5
+
6
+ class LightGTSConfig(PretrainedConfig):
7
+
8
+ model_type = "LightGTS"
9
+
10
+
11
+ def __init__(self, context_points:int = 512, c_in:int = 1, target_dim:int = 96, patch_len:int = 32, stride:int = 32, mask_mode:str = 'patch',mask_nums:int = 3,
12
+ e_layers:int=3, d_layers:int=3, d_model=256, n_heads=16, shared_embedding=True, d_ff:int=512,
13
+ norm:str='BatchNorm', attn_dropout:float=0.4, dropout:float=0., act:str="gelu",
14
+ res_attention:bool=True, pre_norm:bool=False, store_attn:bool=False,
15
+ pe:str='sincos', learn_pe:bool=False, head_dropout = 0,
16
+ head_type = "prediction", individual = False,
17
+ y_range:Optional[tuple]=None, verbose:bool=False, **kwargs):
18
+
19
+ self.context_points = context_points
20
+ self.c_in = c_in
21
+ self.target_dim = target_dim
22
+ self.patch_len = patch_len
23
+ self.stride = stride
24
+ self.num_patch = (max(self.context_points, self.patch_len)-self.patch_len) // self.stride + 1
25
+ self.mask_mode = mask_mode
26
+ self.mask_nums = mask_nums
27
+ self.e_layers = e_layers
28
+ self.d_layers = d_layers
29
+ self.d_model = d_model
30
+ self.n_heads = n_heads
31
+ self.shared_embedding = shared_embedding
32
+ self.d_ff = d_ff
33
+ self.dropout = dropout
34
+ self.attn_dropout = attn_dropout
35
+ self.head_dropout = head_dropout
36
+ self.act = act
37
+ self.head_type = head_type
38
+ self.initializer_range = 0.02
39
+ super().__init__(**kwargs)
modeling_LightGTS.py ADDED
@@ -0,0 +1,768 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+ from configuration_LightGTS import LightGTSConfig
3
+ from ts_generation_mixin import TSGenerationMixin
4
+ import torch
5
+ from torch import nn
6
+ from torch import Tensor
7
+ from typing import Callable, Optional
8
+ import math
9
+ import torch.nn.functional as F
10
+ import numpy as np
11
+
12
+
13
+ class LightGTSPreTrainedModel(PreTrainedModel):
14
+ config_class = LightGTSConfig
15
+ base_model_prefix = "model"
16
+ supports_gradient_checkpointing = True
17
+ _no_split_modules = ["TSTEncoderLayer"]
18
+ _skip_keys_device_placement = "past_key_values"
19
+ _supports_flash_attn_2 = True
20
+ _supports_sdpa = False
21
+ _supports_cache_class = True
22
+
23
+
24
+ def _init_weights(self, module):
25
+ std = self.config.initializer_range
26
+ if isinstance(module, torch.nn.Linear):
27
+ module.weight.data.normal_(mean=0.0, std=std)
28
+ if module.bias is not None:
29
+ module.bias.data.zero_()
30
+ elif isinstance(module, torch.nn.Embedding):
31
+ module.weight.data.normal_(mean=0.0, std=std)
32
+ if module.padding_idx is not None:
33
+ module.weight.data[module.padding_idx].zero_()
34
+
35
+
36
+ class LightGTSForPrediction(LightGTSPreTrainedModel, TSGenerationMixin):
37
+ def __init__(self, config: LightGTSConfig):
38
+ super().__init__(config)
39
+ self.config = config
40
+ self.model = LightGTS(c_in=config.c_in,
41
+ target_dim=config.target_dim,
42
+ patch_len=config.patch_len,
43
+ stride=config.stride,
44
+ num_patch=config.num_patch,
45
+ e_layers=config.e_layers,
46
+ d_layers=config.d_layers,
47
+ n_heads=config.n_heads,
48
+ d_model=config.d_model,
49
+ shared_embedding=True,
50
+ d_ff=config.d_ff,
51
+ dropout=config.dropout,
52
+ attn_dropout=config.attn_dropout,
53
+ head_dropout=config.head_dropout,
54
+ act='relu',
55
+ head_type=config.head_type,
56
+ res_attention=False,
57
+ learn_pe=False
58
+ )
59
+
60
+ def forward(self, input, labels=None, patch_len=None, stride=None, target_dim=None):
61
+
62
+ self.config.patch_len = patch_len
63
+ self.config.stride = stride
64
+ self.config.target_dim = target_dim
65
+
66
+ #切patch
67
+ batch_size,seq_len,n_vars = input.shape
68
+ num_patch = (max(seq_len, self.config.patch_len)-self.config.patch_len) // self.config.stride + 1
69
+ self.config.num_patch = num_patch
70
+ outputs = input.view(batch_size, num_patch, self.config.patch_len, n_vars)
71
+ outputs = outputs.transpose(2, 3)
72
+ outputs = self.model(outputs, target_dim=self.config.target_dim, patch_len=self.config.patch_len, stride=self.config.stride)
73
+
74
+
75
+ loss = None
76
+ if labels is not None:
77
+
78
+ if outputs.shape != labels.shape:
79
+
80
+ outputs = outputs.view(labels.shape)
81
+ loss = self.loss_fn(outputs, labels)
82
+
83
+
84
+ return {"prediction": outputs, "loss": loss}
85
+
86
+ class LightGTSForFinetune(LightGTSPreTrainedModel, TSGenerationMixin):
87
+ def __init__(self, config: LightGTSConfig):
88
+ super().__init__(config)
89
+ self.config = config
90
+ self.model = LightGTS(c_in=config.c_in,
91
+ target_dim=config.target_dim,
92
+ patch_len=config.patch_len,
93
+ stride=config.stride,
94
+ num_patch=config.num_patch,
95
+ e_layers=config.e_layers,
96
+ d_layers=config.d_layers,
97
+ n_heads=config.n_heads,
98
+ d_model=config.d_model,
99
+ shared_embedding=True,
100
+ d_ff=config.d_ff,
101
+ dropout=config.dropout,
102
+ attn_dropout=config.attn_dropout,
103
+ head_dropout=config.head_dropout,
104
+ act='relu',
105
+ head_type=config.head_type,
106
+ res_attention=False,
107
+ learn_pe=False
108
+ )
109
+
110
+ def forward(self, input, labels=None, patch_len=None, stride=None, target_dim=None):
111
+
112
+ if patch_len is not None:
113
+ self.config.patch_len = patch_len
114
+ if stride is not None:
115
+ self.config.stride = stride
116
+ if target_dim is not None:
117
+ self.config.target_dim = target_dim
118
+
119
+ #切patch
120
+ batch_size,seq_len,n_vars = input.shape
121
+ num_patch = (max(seq_len, self.config.patch_len)-self.config.patch_len) // self.config.stride + 1
122
+ self.config.num_patch = num_patch
123
+ outputs = input.view(batch_size, num_patch, self.config.patch_len, n_vars)
124
+ outputs = outputs.transpose(2, 3)
125
+ outputs = self.model(outputs, target_dim=self.config.target_dim, patch_len=self.config.patch_len, stride=self.config.stride)
126
+
127
+
128
+ loss = None
129
+ if labels is not None:
130
+
131
+ if outputs.shape != labels.shape:
132
+
133
+ outputs = outputs.view(labels.shape)
134
+ loss = self.loss_fn(outputs, labels)
135
+
136
+
137
+ return {"prediction": outputs, "loss": loss}
138
+
139
+
140
+
141
+ class LightGTS(nn.Module):
142
+ """
143
+ Output dimension:
144
+ [bs x target_dim x nvars] for prediction
145
+ [bs x target_dim] for regression
146
+ [bs x target_dim] for classification
147
+ [bs x num_patch x n_vars x patch_len] for pretrain
148
+ """
149
+ def __init__(self, c_in:int, target_dim:int, patch_len:int, stride:int, num_patch:int, mask_mode:str = 'patch',mask_nums:int = 3,
150
+ e_layers:int=3, d_layers:int=3, d_model=128, n_heads=16, shared_embedding=True, d_ff:int=256,
151
+ norm:str='BatchNorm', attn_dropout:float=0.4, dropout:float=0., act:str="gelu",
152
+ res_attention:bool=True, pre_norm:bool=False, store_attn:bool=False,
153
+ pe:str='sincos', learn_pe:bool=False, head_dropout = 0,
154
+ head_type = "prediction", individual = False,
155
+ y_range:Optional[tuple]=None, verbose:bool=False, **kwargs):
156
+
157
+ super().__init__()
158
+ assert head_type in ['pretrain', 'prediction', 'regression', 'classification'], 'head type should be either pretrain, prediction, or regression'
159
+
160
+ # Basic
161
+ self.num_patch = num_patch
162
+ self.target_dim=target_dim
163
+ self.out_patch_num = math.ceil(target_dim / patch_len)
164
+ self.target_patch_len = 48
165
+ # Embedding
166
+ self.embedding = nn.Linear(self.target_patch_len, d_model)
167
+ # self.decoder_embedding = nn.Parameter(torch.randn(1, 1,1, d_model),requires_grad=True)
168
+ self.cls_embedding = nn.Parameter(torch.randn(1, 1, 1, d_model),requires_grad=True)
169
+ # self.sep_embedding = nn.Parameter(torch.randn(1, 1, 1, d_model),requires_grad=True)
170
+
171
+ # Position Embedding
172
+ # self.pos = positional_encoding(pe, learn_pe, 1 + num_patch + self.out_patch_num, d_model)
173
+ # self.drop_out = nn.Dropout(dropout)
174
+
175
+ # Encoder
176
+ self.encoder = TSTEncoder(d_model, n_heads, d_ff=d_ff, norm=norm, attn_dropout=attn_dropout, dropout=dropout,
177
+ pre_norm=pre_norm, activation=act, res_attention=res_attention, n_layers=e_layers,
178
+ store_attn=store_attn)
179
+
180
+ # Decoder
181
+ self.decoder = Decoder(d_layers, patch_len=patch_len, d_model=d_model, n_heads=n_heads, d_ff=d_ff,attn_dropout= attn_dropout, dropout=dropout)
182
+
183
+ # Head
184
+ self.n_vars = c_in
185
+ self.head_type = head_type
186
+ self.mask_mode = mask_mode
187
+ self.mask_nums = mask_nums
188
+ self.d_model = d_model
189
+ self.patch_len = patch_len
190
+
191
+
192
+
193
+
194
+ if head_type == "pretrain":
195
+ self.head = PretrainHead(d_model, patch_len, head_dropout) # custom head passed as a partial func with all its kwargs
196
+ elif head_type == "prediction":
197
+ self.head = decoder_PredictHead(d_model, self.patch_len, self.target_patch_len, head_dropout)
198
+
199
+ def get_dynamic_weights(self, n_preds, decay_rate=0.5):
200
+ """
201
+ Generate dynamic weights for the replicated tokens using an exponential decay scheme.
202
+
203
+ Args:
204
+ - n_preds (int): Number of predictions to generate weights for.
205
+ - decay_rate (float): The base of the exponential decay. Lower values decay faster (default: 0.9).
206
+
207
+ Returns:
208
+ - torch.Tensor: A tensor of weights with exponential decay.
209
+ """
210
+ # Exponential decay weights
211
+ weights = decay_rate ** torch.arange(n_preds)
212
+ return weights
213
+
214
+ def decoder_predict(self, bs, n_vars, dec_cross):
215
+ """
216
+ dec_cross: tensor [bs x n_vars x num_patch x d_model]
217
+ """
218
+ # dec_in = self.decoder_embedding.expand(bs, self.n_vars, self.out_patch_num, -1)
219
+ # dec_in = self.embedding(self.decoder_len).expand(bs, -1, -1, -1)
220
+ # dec_in = self.decoder_embedding.expand(bs, n_vars, self.out_patch_num, -1)
221
+ # dec_in = dec_cross.mean(2).unsqueeze(2).expand(-1,-1,self.out_patch_num,-1)
222
+ dec_in = dec_cross[:,:,-1,:].unsqueeze(2).expand(-1,-1,self.out_patch_num,-1)
223
+ weights = self.get_dynamic_weights(self.out_patch_num).to(dec_in.device)
224
+ dec_in = dec_in * weights.unsqueeze(0).unsqueeze(0).unsqueeze(-1)
225
+ # dec_in = torch.cat((dec_in, self.sep_tokens), dim=2)
226
+
227
+ # dec_in = dec_cross[:,:,-self.out_patch_num:,:]
228
+ # dec_in = torch.ones([bs, n_vars, self.out_patch_num, self.d_model]).to(dec_cross.device)
229
+ # dec_in = dec_in + self.pos[-self.out_patch_num:,:]
230
+ decoder_output = self.decoder(dec_in, dec_cross)
231
+ decoder_output = decoder_output.transpose(2,3)
232
+
233
+ return decoder_output
234
+
235
+
236
+ def forward(self, z, target_dim=None, patch_len=None, stride=None):
237
+ """
238
+ z: tensor [bs x num_patch x n_vars x patch_len]
239
+ """
240
+
241
+ if target_dim is not None:
242
+ self.target_dim = target_dim
243
+ if patch_len is not None:
244
+ self.patch_len = patch_len
245
+ if stride is not None:
246
+ self.stride = stride
247
+ self.out_patch_num = math.ceil(self.target_dim / self.patch_len)
248
+
249
+ bs, num_patch, n_vars, patch_len = z.shape
250
+ # tokenizer
251
+ cls_tokens = self.cls_embedding.expand(bs, n_vars, -1, -1)
252
+
253
+ embedding = nn.Linear(patch_len, self.d_model, bias=False)
254
+ embedding.weight.data = resample_patchemb(old=self.embedding.weight.data, new_patch_len=self.patch_len)
255
+
256
+ z = embedding(z).permute(0,2,1,3) # [bs x n_vars x num_patch x d_model]
257
+ z = torch.cat((cls_tokens, z), dim=2) # [bs x n_vars x (1 + num_patch) x d_model]
258
+ # z = self.drop_out(z + self.pos[:1 + self.num_patch, :])
259
+
260
+ # encoder
261
+ z = torch.reshape(z, (-1, 1 + num_patch, self.d_model)) # [bs*n_vars x num_patch x d_model]
262
+ z = self.encoder(z)
263
+ z = torch.reshape(z, (-1, n_vars, 1 + num_patch, self.d_model)) # [bs, n_vars x num_patch x d_model]
264
+
265
+ # decoder
266
+ z = self.decoder_predict(bs, n_vars, z[:,:,:,:])
267
+
268
+ # predict
269
+ z = self.head(z[:,:,:,:], self.patch_len)
270
+ z = z[:,:self.target_dim, :]
271
+
272
+
273
+ # z: [bs x target_dim x nvars] for prediction
274
+ # [bs x target_dim] for regression
275
+ # [bs x target_dim] for classification
276
+ # [bs x num_patch x n_vars x patch_len] for pretrain
277
+ return z
278
+
279
+ class TSTEncoder(nn.Module):
280
+ def __init__(self, d_model, n_heads, d_ff=None,
281
+ norm='BatchNorm', attn_dropout=0., dropout=0., activation='gelu',
282
+ res_attention=False, n_layers=1, pre_norm=False, store_attn=False):
283
+ super().__init__()
284
+
285
+ self.layers = nn.ModuleList([TSTEncoderLayer(d_model, n_heads=n_heads, d_ff=d_ff, norm=norm,
286
+ attn_dropout=attn_dropout, dropout=dropout,
287
+ activation=activation, res_attention=res_attention,
288
+ pre_norm=pre_norm, store_attn=store_attn) for i in range(n_layers)])
289
+ self.res_attention = res_attention
290
+
291
+ def forward(self, src:Tensor):
292
+ """
293
+ src: tensor [bs x q_len x d_model]
294
+ """
295
+ output = src
296
+ scores = None
297
+ if self.res_attention:
298
+ for mod in self.layers: output, scores = mod(output, prev=scores)
299
+ return output
300
+ else:
301
+ for mod in self.layers: output = mod(output)
302
+ return output
303
+
304
+ class TSTEncoderLayer(nn.Module):
305
+ def __init__(self, d_model, n_heads, d_ff=256, store_attn=False,
306
+ norm='LayerNorm', attn_dropout=0, dropout=0., bias=True,
307
+ activation="gelu", res_attention=False, pre_norm=False):
308
+ super().__init__()
309
+ assert not d_model%n_heads, f"d_model ({d_model}) must be divisible by n_heads ({n_heads})"
310
+ d_k = d_model // n_heads
311
+ d_v = d_model // n_heads
312
+
313
+ # Multi-Head attention
314
+ self.res_attention = res_attention
315
+ self.self_attn = MultiheadAttention(d_model, n_heads, d_k, d_v, attn_dropout=attn_dropout, proj_dropout=dropout, res_attention=res_attention)
316
+
317
+ # Add & Norm
318
+ self.dropout_attn = nn.Dropout(dropout)
319
+ if "batch" in norm.lower():
320
+ self.norm_attn = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))
321
+ else:
322
+ self.norm_attn = nn.LayerNorm(d_model)
323
+
324
+ # Position-wise Feed-Forward
325
+ self.ff = nn.Sequential(nn.Linear(d_model, d_ff, bias=bias),
326
+ get_activation_fn(activation),
327
+ nn.Dropout(dropout),
328
+ nn.Linear(d_ff, d_model, bias=bias))
329
+
330
+ # Add & Norm
331
+ self.dropout_ffn = nn.Dropout(dropout)
332
+ if "batch" in norm.lower():
333
+ self.norm_ffn = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))
334
+ else:
335
+ self.norm_ffn = nn.LayerNorm(d_model)
336
+
337
+ self.pre_norm = pre_norm
338
+ self.store_attn = store_attn
339
+
340
+ # # se block
341
+ # self.SE = SE_Block(inchannel=7)
342
+
343
+
344
+ def forward(self, src:Tensor, prev:Optional[Tensor]=None):
345
+ """
346
+ src: tensor [bs x q_len x d_model]
347
+ """
348
+ # Multi-Head attention sublayer
349
+ if self.pre_norm:
350
+ src = self.norm_attn(src)
351
+ ## Multi-Head attention
352
+ if self.res_attention:
353
+ src2, attn, scores = self.self_attn(src, src, src, prev)
354
+ else:
355
+ # attention_mask = causal_attention_mask(src.shape[1]).to(src.device)
356
+ # src2, attn = self.self_attn(src, src, src, attn_mask=attention_mask)
357
+ src2, attn = self.self_attn(src, src, src)
358
+ if self.store_attn:
359
+ self.attn = attn
360
+
361
+ # total, num_patch, d_model = src2.size()
362
+ # bs = int(total/7)
363
+
364
+ # src2 = self.SE(src2.reshape(bs, 7, num_patch, -1)).reshape(total, num_patch, -1)
365
+
366
+
367
+ ## Add & Norm
368
+ src = src + self.dropout_attn(src2) # Add: residual connection with residual dropout
369
+ if not self.pre_norm:
370
+ src = self.norm_attn(src)
371
+
372
+ # Feed-forward sublayer
373
+ if self.pre_norm:
374
+ src = self.norm_ffn(src)
375
+ ## Position-wise Feed-Forward
376
+ src2 = self.ff(src)
377
+ ## Add & Norm
378
+ src = src + self.dropout_ffn(src2) # Add: residual connection with residual dropout
379
+ if not self.pre_norm:
380
+ src = self.norm_ffn(src)
381
+
382
+ if self.res_attention:
383
+ return src, scores
384
+ else:
385
+ return src
386
+
387
+
388
+ class Decoder(nn.Module):
389
+ def __init__(self, d_layers, patch_len, d_model, n_heads, d_ff=None, attn_dropout=0.2, dropout=0.1):
390
+ super(Decoder, self).__init__()
391
+
392
+ self.decoder_layers = nn.ModuleList()
393
+ for i in range(d_layers):
394
+ self.decoder_layers.append(DecoderLayer(patch_len, d_model, n_heads, d_ff, attn_dropout, dropout))
395
+
396
+ def forward(self, x, cross):
397
+ output = x
398
+ for layer in self.decoder_layers:
399
+ output = layer(output, cross)
400
+ return output
401
+
402
+
403
+ class DecoderLayer(nn.Module):
404
+ def __init__(self, patch_len, d_model, n_heads, d_ff=None, attn_dropout = 0.2, dropout=0.5, norm="BatchNorm"):
405
+ super(DecoderLayer, self).__init__()
406
+ self.self_attention = MultiheadAttention(d_model, n_heads, res_attention=False, attn_dropout=attn_dropout)
407
+ self.cross_attention = MultiheadAttention(d_model, n_heads, attn_dropout=attn_dropout, rope_type=True)
408
+ # self.pos_embed = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1, groups=d_model)
409
+
410
+ if 'batch' in norm.lower():
411
+ self.norm1 = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))
412
+ self.norm2 = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))
413
+ self.norm3 = nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2))
414
+ else:
415
+ self.norm1 = nn.LayerNorm(d_model)
416
+ self.norm2 = nn.LayerNorm(d_model)
417
+ self.norm3 = nn.LayerNorm(d_model)
418
+
419
+
420
+ self.dropout = nn.Dropout(dropout)
421
+
422
+ self.MLP1 = CMlp(in_features = d_model, hidden_features = d_ff, out_features = d_model, drop=dropout)
423
+
424
+
425
+
426
+ def forward(self, x, cross):
427
+ batch, n_vars, num_patch, d_model = x.shape
428
+ x = x.reshape(batch*n_vars, num_patch, d_model)
429
+
430
+ # x = x.permute(0,2,1)
431
+ # x = x + self.pos_embed(x)
432
+ # x = x.permute(0,2,1)
433
+
434
+ cross = cross.reshape(batch*n_vars, -1, d_model)
435
+
436
+ attention_mask = causal_attention_mask(num_patch).to(x.device)
437
+ x_attn , _= self.self_attention(x, attn_mask=attention_mask)
438
+ x_attn = self.norm1(x_attn) + x
439
+
440
+ x_cross , _ = self.cross_attention(x_attn, cross, cross)
441
+ x_cross = self.dropout(self.norm2(x_cross)) + x_attn
442
+
443
+ x_ff = self.MLP1(x_cross)
444
+ x_ff = self.norm3(x_ff) + x_cross
445
+
446
+ x_ff = x_ff.reshape(batch, n_vars, num_patch, d_model)
447
+
448
+ return x_ff
449
+
450
+ def causal_attention_mask(seq_length):
451
+ """
452
+ 创建一个因果注意力掩码。掩码中的每个位置 (i, j)
453
+ 表示在计算第i个位置的attention时, 第j个位置是否可以被看见。
454
+ 如果j <= i, 这个位置被设为1(可见), 否则设为0(不可见)。
455
+
456
+ Args:
457
+ seq_length (int): 序列的长度
458
+
459
+ Returns:
460
+ torch.Tensor: 因果注意力掩码,大小为 (seq_length, seq_length)
461
+ """
462
+ mask = torch.triu(torch.ones(seq_length, seq_length) * float('-inf'), diagonal=1)
463
+ return mask
464
+
465
+ class CMlp(nn.Module):
466
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
467
+ super().__init__()
468
+ out_features = out_features or in_features
469
+ hidden_features = hidden_features or in_features
470
+ self.fc1 = nn.Conv1d(in_features, hidden_features, 1)
471
+ self.act = act_layer()
472
+ self.fc2 = nn.Conv1d(hidden_features, out_features, 1)
473
+ self.drop = nn.Dropout(drop)
474
+
475
+ def forward(self, x):
476
+ x = x.permute(0,2,1)
477
+ x = self.fc1(x)
478
+ x = self.act(x)
479
+ x = self.drop(x)
480
+ x = self.fc2(x)
481
+ x = self.drop(x)
482
+ x = x.permute(0,2,1)
483
+ return x
484
+
485
+ class Transpose(nn.Module):
486
+ def __init__(self, *dims, contiguous=False):
487
+ super().__init__()
488
+ self.dims, self.contiguous = dims, contiguous
489
+ def forward(self, x):
490
+ if self.contiguous: return x.transpose(*self.dims).contiguous()
491
+ else: return x.transpose(*self.dims)
492
+
493
+
494
+ class MultiheadAttention(nn.Module):
495
+ def __init__(self, d_model, n_heads, d_k=None, d_v=None, res_attention=False, attn_dropout=0., proj_dropout=0., qkv_bias=True, lsa=False, rope_type=False):
496
+ """Multi Head Attention Layer
497
+ Input shape:
498
+ Q: [batch_size (bs) x max_q_len x d_model]
499
+ K, V: [batch_size (bs) x q_len x d_model]
500
+ mask: [q_len x q_len]
501
+ """
502
+ super().__init__()
503
+ d_k = d_model // n_heads if d_k is None else d_k
504
+ d_v = d_model // n_heads if d_v is None else d_v
505
+
506
+ self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v
507
+
508
+ self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)
509
+ self.W_K = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)
510
+ self.W_V = nn.Linear(d_model, d_v * n_heads, bias=qkv_bias)
511
+
512
+ # Scaled Dot-Product Attention (multiple heads)
513
+ self.res_attention = res_attention
514
+ self.sdp_attn = ScaledDotProductAttention(d_model, n_heads, attn_dropout=attn_dropout, res_attention=self.res_attention, lsa=lsa, rope_type=rope_type)
515
+
516
+ # Poject output
517
+ self.to_out = nn.Sequential(nn.Linear(n_heads * d_v, d_model), nn.Dropout(proj_dropout))
518
+
519
+
520
+
521
+
522
+ def forward(self, Q:Tensor, K:Optional[Tensor]=None, V:Optional[Tensor]=None, prev:Optional[Tensor]=None,
523
+ key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):
524
+
525
+ bs = Q.size(0)
526
+ if K is None: K = Q
527
+ if V is None: V = Q
528
+
529
+ # Linear (+ split in multiple heads)
530
+ q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2) # q_s : [bs x n_heads x max_q_len x d_k]
531
+ k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1) # k_s : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)
532
+ v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2) # v_s : [bs x n_heads x q_len x d_v]
533
+
534
+ # Apply Scaled Dot-Product Attention (multiple heads)
535
+ if self.res_attention:
536
+ output, attn_weights, attn_scores = self.sdp_attn(q_s, k_s, v_s, prev=prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
537
+ else:
538
+ output, attn_weights = self.sdp_attn(q_s, k_s, v_s, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
539
+ # output: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len], scores: [bs x n_heads x max_q_len x q_len]
540
+
541
+ # back to the original inputs dimensions
542
+ output = output.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # output: [bs x q_len x n_heads * d_v]
543
+ output = self.to_out(output)
544
+
545
+ if self.res_attention: return output, attn_weights, attn_scores
546
+ else: return output, attn_weights
547
+
548
+ class ScaledDotProductAttention(nn.Module):
549
+ r"""Scaled Dot-Product Attention module (Attention is all you need by Vaswani et al., 2017) with optional residual attention from previous layer
550
+ (Realformer: Transformer likes residual attention by He et al, 2020) and locality self sttention (Vision Transformer for Small-Size Datasets
551
+ by Lee et al, 2021)"""
552
+
553
+ def __init__(self, d_model, n_heads, attn_dropout=0., res_attention=False, lsa=False, rope_type=False):
554
+ super().__init__()
555
+ self.attn_dropout = nn.Dropout(attn_dropout)
556
+ self.res_attention = res_attention
557
+ head_dim = d_model // n_heads
558
+ self.scale = nn.Parameter(torch.tensor(head_dim ** -0.5), requires_grad=lsa)
559
+ self.lsa = lsa
560
+ self.rope_type = rope_type
561
+
562
+ def forward(self, q:Tensor, k:Tensor, v:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):
563
+ '''
564
+ Input shape:
565
+ q : [bs x n_heads x max_q_len x d_k]
566
+ k : [bs x n_heads x d_k x seq_len]
567
+ v : [bs x n_heads x seq_len x d_v]
568
+ prev : [bs x n_heads x q_len x seq_len]
569
+ key_padding_mask: [bs x seq_len]
570
+ attn_mask : [1 x seq_len x seq_len]
571
+ Output shape:
572
+ output: [bs x n_heads x q_len x d_v]
573
+ attn : [bs x n_heads x q_len x seq_len]
574
+ scores : [bs x n_heads x q_len x seq_len]
575
+ '''
576
+ # using RoPE
577
+ if self.rope_type:
578
+ q, k = RoPE_decoder(q, k.permute(0,1,3,2))
579
+ else:
580
+ q, k = RoPE(q, k.permute(0,1,3,2))
581
+ k = k.permute(0,1,3,2)
582
+
583
+ # Scaled MatMul (q, k) - similarity scores for all pairs of positions in an input sequence
584
+ attn_scores = torch.matmul(q, k) * self.scale # attn_scores : [bs x n_heads x max_q_len x q_len]
585
+
586
+ # Add pre-softmax attention scores from the previous layer (optional)
587
+ if prev is not None: attn_scores = attn_scores + prev
588
+
589
+ # Attention mask (optional)
590
+ if attn_mask is not None: # attn_mask with shape [q_len x seq_len] - only used when q_len == seq_len
591
+ if attn_mask.dtype == torch.bool:
592
+ attn_scores.masked_fill_(attn_mask, -np.inf)
593
+ else:
594
+ attn_scores += attn_mask
595
+
596
+ # Key padding mask (optional)
597
+ if key_padding_mask is not None: # mask with shape [bs x q_len] (only when max_w_len == q_len)
598
+ attn_scores.masked_fill_(key_padding_mask.unsqueeze(1).unsqueeze(2), -np.inf)
599
+
600
+ # normalize the attention weights
601
+ attn_weights = F.softmax(attn_scores, dim=-1) # attn_weights : [bs x n_heads x max_q_len x q_len]
602
+ attn_weights = self.attn_dropout(attn_weights)
603
+
604
+ # compute the new values given the attention weights
605
+ output = torch.matmul(attn_weights, v) # output: [bs x n_heads x max_q_len x d_v]
606
+
607
+ if self.res_attention: return output, attn_weights, attn_scores
608
+ else: return output, attn_weights
609
+
610
+ def RoPE(q, k):
611
+ # q,k: (bs, head, max_len, output_dim)
612
+ batch_size = q.shape[0]
613
+ nums_head = q.shape[1]
614
+ max_len = q.shape[2]
615
+ output_dim = q.shape[-1]
616
+
617
+ # (bs, head, max_len, output_dim)
618
+ pos_emb = sinusoidal_position_embedding(batch_size, nums_head, max_len, output_dim, q.device, factor=1)
619
+
620
+ # cos_pos,sin_pos: (bs, head, max_len, output_dim)
621
+ # 看rope公式可知,相邻cos,sin之间是相同的,所以复制一遍。如(1,2,3)变成(1,1,2,2,3,3)
622
+ cos_pos = pos_emb[..., 1::2].repeat_interleave(2, dim=-1) # 将奇数列信息抽取出来也就是cos 拿出来并复制
623
+ sin_pos = pos_emb[..., ::2].repeat_interleave(2, dim=-1) # 将偶数列信息抽取出来也就是sin 拿出来并复制
624
+
625
+ # q,k: (bs, head, max_len, output_dim)
626
+ q2 = torch.stack([-q[..., 1::2], q[..., ::2]], dim=-1)
627
+ q2 = q2.reshape(q.shape) # reshape后就是正负交替了
628
+
629
+
630
+ # 更新qw, *对应位置相乘
631
+ q = q * cos_pos + q2 * sin_pos
632
+
633
+ k2 = torch.stack([-k[..., 1::2], k[..., ::2]], dim=-1)
634
+ k2 = k2.reshape(k.shape)
635
+ # 更新kw, *对应位置相乘
636
+ k = k * cos_pos + k2 * sin_pos
637
+
638
+ return q, k
639
+
640
+
641
+ def RoPE_decoder(q, k):
642
+ # q,k: (bs, head, max_len, output_dim)
643
+ batch_size = q.shape[0]
644
+ nums_head = q.shape[1]
645
+ q_max_len = q.shape[2]
646
+ k_max_len = k.shape[2]
647
+ output_dim = q.shape[-1]
648
+
649
+ # (bs, head, max_len, output_dim)
650
+ pos_emb = sinusoidal_position_embedding(batch_size, nums_head, k_max_len + q_max_len, output_dim, q.device, factor=1)
651
+
652
+
653
+ # cos_pos,sin_pos: (bs, head, max_len, output_dim)
654
+ # 看rope公式可知,相邻cos,sin之间是相同的,所以复制一遍。如(1,2,3)变成(1,1,2,2,3,3)
655
+ cos_pos = pos_emb[..., 1::2].repeat_interleave(2, dim=-1) # 将奇数列信息抽取出来也就是cos 拿出来并复制
656
+ sin_pos = pos_emb[..., ::2].repeat_interleave(2, dim=-1) # 将偶数列信息抽取出来也就是sin 拿出来并复制
657
+
658
+ # q,k: (bs, head, max_len, output_dim)
659
+ q2 = torch.stack([-q[..., 1::2], q[..., ::2]], dim=-1)
660
+ q2 = q2.reshape(q.shape) # reshape后就是正负交替了
661
+
662
+
663
+ # 更新qw, *对应位置相乘
664
+ q = q * cos_pos[:,:,-q_max_len:,:] + q2 * sin_pos[:,:,-q_max_len:,:]
665
+
666
+
667
+ k2 = torch.stack([-k[..., 1::2], k[..., ::2]], dim=-1)
668
+ k2 = k2.reshape(k.shape)
669
+ # 更新kw, *对应位置相乘
670
+ k = k * cos_pos[:,:,:k_max_len,:] + k2 * sin_pos[:,:,:k_max_len,:]
671
+ return q, k
672
+
673
+ def sinusoidal_position_embedding(batch_size, nums_head, max_len, output_dim, device, factor=1.0):
674
+ # (max_len * factor, 1)
675
+ position = torch.arange(0, max_len * factor, 1 / factor, dtype=torch.float).unsqueeze(-1)
676
+ # (output_dim//2)
677
+ ids = torch.arange(0, output_dim // 2, dtype=torch.float) # i 范围是 [0, d/2]
678
+ theta = torch.pow(10000, -2 * ids / output_dim)
679
+
680
+ # (max_len * factor, output_dim//2)
681
+ embeddings = position * theta
682
+
683
+ # (max_len * factor, output_dim//2, 2)
684
+ embeddings = torch.stack([torch.sin(embeddings), torch.cos(embeddings)], dim=-1)
685
+
686
+ # (bs, head, max_len * factor, output_dim//2, 2)
687
+ embeddings = embeddings.repeat((batch_size, nums_head, *([1] * len(embeddings.shape))))
688
+
689
+ # (bs, head, max_len * factor, output_dim)
690
+ embeddings = torch.reshape(embeddings, (batch_size, nums_head, -1, output_dim))
691
+ embeddings = embeddings.to(device)
692
+
693
+ # 如果 factor > 1, 使用插值位置来生成更细粒度的嵌入
694
+ if factor > 1.0:
695
+ interpolation_indices = torch.linspace(0, embeddings.shape[2] - 1, max_len).long()
696
+ embeddings = embeddings[:, :, interpolation_indices, :]
697
+
698
+ return embeddings
699
+
700
+ class PretrainHead(nn.Module):
701
+ def __init__(self, d_model, patch_len, dropout):
702
+ super().__init__()
703
+ self.dropout = nn.Dropout(dropout)
704
+ self.linear = nn.Linear(d_model, patch_len)
705
+
706
+ def forward(self, x):
707
+ """
708
+ x: tensor [bs x nvars x d_model x num_patch]
709
+ output: tensor [bs x nvars x num_patch x patch_len]
710
+ """
711
+
712
+ x = x.transpose(2,3) # [bs x nvars x num_patch x d_model]
713
+ x = self.linear( self.dropout(x) ) # [bs x nvars x num_patch x patch_len]
714
+ x = x.permute(0,2,1,3) # [bs x num_patch x nvars x patch_len]
715
+ return x
716
+
717
+
718
+ class decoder_PredictHead(nn.Module):
719
+ def __init__(self, d_model, patch_len, target_patch_len, dropout):
720
+ super().__init__()
721
+ self.dropout = nn.Dropout(dropout)
722
+ self.linear = nn.Linear(d_model, target_patch_len)
723
+ self.d_model = d_model
724
+
725
+ def forward(self, x, patch_len):
726
+ """
727
+ x: tensor [bs x nvars x d_model x num_patch]
728
+ output: tensor [bs x nvars x num_patch x patch_len]
729
+ """
730
+ Linear = nn.Linear(self.d_model, patch_len, bias=False)
731
+ Linear.weight.data = resample_patchemb(old=self.linear.weight.data.T, new_patch_len=patch_len).T
732
+
733
+ x = x.transpose(2,3) # [bs x nvars x num_patch x d_model]
734
+ x = Linear( self.dropout(x) ) # [bs x nvars x num_patch x patch_len]
735
+ x = x.permute(0,2,3,1) # [bs x num_patch x x patch_len x nvars]
736
+ return x.reshape(x.shape[0],-1,x.shape[3])
737
+
738
+ def resample_patchemb(old: torch.Tensor, new_patch_len: int):
739
+
740
+ assert old.dim() == 2, "输入张量应为2D (d_model, patch_size)"
741
+ if old.size(1) == new_patch_len:
742
+ return old
743
+
744
+ old = old.T
745
+ old_shape = old.size(0)
746
+ factor = new_patch_len/old_shape
747
+
748
+ # 定义辅助函数:批量resize
749
+ def resize(x_tensor, new_shape):
750
+ return F.interpolate(x_tensor.unsqueeze(0), size=new_shape, mode='linear').squeeze(0)
751
+
752
+ # 构造缩放矩阵
753
+ basis_vectors = torch.eye(old_shape, dtype=torch.float32, device=old.device)
754
+ resize_mat = resize(basis_vectors, new_patch_len).T
755
+ # 计算伪逆
756
+ resize_mat_pinv = torch.linalg.pinv(resize_mat.T)
757
+
758
+ # 直接矩阵操作完成重采样
759
+ resampled_kernels = resize_mat_pinv @ old * math.sqrt(factor)
760
+
761
+ return resampled_kernels.T
762
+
763
+
764
+ def get_activation_fn(activation):
765
+ if callable(activation): return activation()
766
+ elif activation.lower() == "relu": return nn.ReLU()
767
+ elif activation.lower() == "gelu": return nn.GELU()
768
+ raise ValueError(f'{activation} is not available. You can use "relu", "gelu", or a callable')
test.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
test_finetune.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from torch.utils.data import Dataset, DataLoader
5
+ import pandas as pd
6
+ from sklearn.preprocessing import StandardScaler
7
+ from tqdm import tqdm
8
+ import os
9
+ from configuration_LightGTS import LightGTSConfig
10
+ from modeling_LightGTS import LightGTSForFinetune
11
+ import torch
12
+ from transformers import AutoModelForCausalLM
13
+ from transformers import AutoModelForCausalLM, MODEL_MAPPING
14
+ from transformers import AutoConfig
15
+ os.environ['CUDA_VISIBLE_DEVICES']='6'
16
+ class TimeSeriesDataset(Dataset):
17
+ def __init__(self, df, target_col="HUFL", lookback=528, pred_len=192):
18
+ self.lookback = lookback
19
+ self.pred_len = pred_len
20
+
21
+ # 提取目标列
22
+ data = df[[target_col]].values
23
+
24
+ # 强烈建议在微调时进行标准化
25
+ self.scaler = StandardScaler()
26
+ self.data = self.scaler.fit_transform(data)
27
+
28
+ def __len__(self):
29
+ # 确保有足够的长度来切分一段 lookback + pred_len
30
+ return len(self.data) - self.lookback - self.pred_len + 1
31
+
32
+ def __getitem__(self, idx):
33
+ # 获取输入窗口和目标预测窗口
34
+ x = self.data[idx : idx + self.lookback]
35
+ y = self.data[idx + self.lookback : idx + self.lookback + self.pred_len]
36
+
37
+ return {
38
+ "input": torch.tensor(x, dtype=torch.float32),
39
+ "labels": torch.tensor(y, dtype=torch.float32)
40
+ }
41
+
42
+ if __name__=="__main__":
43
+ print("Loading data...")
44
+ df1 = pd.read_csv("/home/wlf/LightGTS/LightGTS/data/predict_datasets/ETTh1.csv")
45
+
46
+ lookback_length = 528
47
+ target_length = 240
48
+
49
+ train_dataset = TimeSeriesDataset(df1, target_col="HUFL", lookback=lookback_length, pred_len=target_length)
50
+ train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True, drop_last=True)
51
+
52
+
53
+ print("Initializing model...")
54
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
55
+
56
+ LightGTS_config = LightGTSConfig(context_points=528, c_in=1, target_dim=192, patch_len=48, stride=48)
57
+ LightGTS_config.save_pretrained("LightGTS-huggingface")
58
+
59
+ AutoConfig.register("LightGTS",LightGTSConfig)
60
+ AutoModelForCausalLM.register(LightGTSConfig, LightGTSForFinetune)
61
+
62
+ model = AutoModelForCausalLM.from_pretrained(
63
+ "./LightGTS-huggingface",
64
+ trust_remote_code=True
65
+ )
66
+
67
+ model.loss_fn = nn.MSELoss()
68
+ model.to(device)
69
+ optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-2)
70
+
71
+ num_epochs = 1
72
+ print(f"Starting training on {device} for {num_epochs} epochs...")
73
+
74
+ for epoch in range(num_epochs):
75
+ model.train()
76
+ total_loss = 0.0
77
+
78
+ progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}/{num_epochs}")
79
+
80
+ for batch in progress_bar:
81
+
82
+ inputs = batch["input"].to(device)
83
+ labels = batch["labels"].to(device)
84
+
85
+ optimizer.zero_grad()
86
+
87
+ outputs = model(input=inputs, labels=labels, target_dim = 240, patch_len = 24, stride = 24)
88
+ loss = outputs["loss"]
89
+
90
+ loss.backward()
91
+ optimizer.step()
92
+
93
+ total_loss += loss.item()
94
+
95
+ progress_bar.set_postfix({"loss": f"{loss.item():.4f}"})
96
+
97
+ avg_loss = total_loss / len(train_dataloader)
98
+ print(f"Epoch [{epoch+1}/{num_epochs}] completed. Average Loss: {avg_loss:.4f}\n")
99
+
100
+ model.save_pretrained("./LightGTS-finetuned")
101
+ print("Training complete!")
test_zero-shot.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import numpy as np
4
+ import pandas as pd
5
+ import os
6
+ import random
7
+ import matplotlib.pyplot as plt
8
+ from matplotlib.font_manager import FontProperties
9
+ from torch.utils.data import DataLoader
10
+ from sklearn.preprocessing import StandardScaler
11
+ from configuration_LightGTS import LightGTSConfig
12
+ from modeling_LightGTS import LightGTSForPrediction
13
+ import torch
14
+ from transformers import AutoModelForCausalLM
15
+ from transformers import AutoModelForCausalLM, MODEL_MAPPING
16
+ from transformers import AutoConfig
17
+
18
+ if __name__ == "__main__":
19
+ LightGTS_config = LightGTSConfig(context_points=528, c_in=1, target_dim=192, patch_len=48, stride=48)
20
+ LightGTS_config.save_pretrained("LightGTS-huggingface")
21
+
22
+ AutoConfig.register("LightGTS",LightGTSConfig)
23
+ AutoModelForCausalLM.register(LightGTSConfig, LightGTSForPrediction)
24
+
25
+ model = AutoModelForCausalLM.from_pretrained(
26
+ "./LightGTS-huggingface",
27
+ trust_remote_code=True
28
+ )
29
+ df1 = pd.read_csv("/home/wlf/LightGTS/LightGTS/data/predict_datasets/ETTh1.csv")
30
+ df2 = pd.read_csv("/home/wlf/LightGTS/LightGTS/data/predict_datasets/ETTh2.csv")
31
+ print(df1,df2)
32
+
33
+ start = 300
34
+ lookback_length = 576
35
+ lookback = torch.tensor(df1["HUFL"][start:start+lookback_length].values).unsqueeze(0).unsqueeze(-1).float()
36
+ all_length = 768
37
+ all = torch.tensor(df1["HUFL"][start:start+all_length].values).unsqueeze(0).unsqueeze(-1).float()
38
+
39
+ lookback2 = torch.tensor(df2["OT"][start:start+lookback_length].values).unsqueeze(0).unsqueeze(-1).float()
40
+ all2 = torch.tensor(df2["OT"][start:start+all_length].values).unsqueeze(0).unsqueeze(-1).float()
41
+ print(lookback.shape)
42
+
43
+ # zero-shot sample
44
+ outputs = model.generate(lookback, patch_len = 48, stride_len=48, max_output_length=192)
45
+ outputs2 = model.generate(lookback2, patch_len = 32, stride_len=32, max_output_length=192)
46
+ print(outputs2.shape)
ts_generation_mixin.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import Any, Dict, List, Optional, Union, Callable
3
+ import torch
4
+ from transformers import GenerationMixin, LogitsProcessorList, StoppingCriteriaList
5
+ from transformers.generation.utils import GenerationConfig, GenerateOutput
6
+ from transformers.utils import ModelOutput
7
+
8
+
9
+ class TSGenerationMixin(GenerationMixin):
10
+ @torch.no_grad()
11
+ def generate(self,
12
+ inputs: Optional[torch.Tensor] = None,
13
+ generation_config: Optional[GenerationConfig] = None,
14
+ logits_processor: Optional[LogitsProcessorList] = None,
15
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
16
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
17
+ synced_gpus: Optional[bool] = None,
18
+ assistant_model: Optional["PreTrainedModel"] = None,
19
+ streamer: Optional["BaseStreamer"] = None,
20
+ negative_prompt_ids: Optional[torch.Tensor] = None,
21
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
22
+ revin: Optional[bool] = True,
23
+ patch_len:Optional[int] = None,
24
+ stride_len:Optional[int]= None,
25
+ max_output_length:Optional[int] = None,
26
+ inference_patch_len: Optional[int] = None,
27
+
28
+ **kwargs,
29
+ ) -> Union[GenerateOutput, torch.Tensor]:
30
+
31
+
32
+ if len(inputs.shape) != 3:
33
+ raise ValueError('Input shape must be: [batch_size, seq_len, n_vars]')
34
+
35
+ if revin:
36
+ means = inputs.mean(dim=1, keepdim=True)
37
+ stdev = inputs.std(dim=1, keepdim=True, unbiased=False) + 1e-5
38
+ inputs = (inputs - means) / stdev
39
+
40
+
41
+
42
+ model_inputs = {
43
+ "input" : inputs,
44
+ "patch_len" : patch_len,
45
+ "stride" : stride_len,
46
+ "target_dim" : max_output_length
47
+ }
48
+
49
+
50
+ outputs = self(**model_inputs) #[batch_size,target_dim,n_vars]
51
+
52
+ outputs = outputs["prediction"]
53
+
54
+
55
+ if revin:
56
+
57
+ outputs = (outputs * stdev) + means
58
+
59
+ return outputs
60
+
61
+
62
+ def _update_model_kwargs_for_generation(
63
+ self,
64
+ outputs: ModelOutput,
65
+ model_kwargs: Dict[str, Any],
66
+ horizon_length: int = 1,
67
+ is_encoder_decoder: bool = False,
68
+ standardize_cache_format: bool = False,
69
+ ) -> Dict[str, Any]:
70
+
71
+ return model_kwargs
72
+
73
+