File size: 1,621 Bytes
c882c3e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | from transformers import PretrainedConfig
from typing import Optional
import math
class LightGTSConfig(PretrainedConfig):
model_type = "LightGTS"
def __init__(self, context_points:int = 512, c_in:int = 1, target_dim:int = 96, patch_len:int = 32, stride:int = 32, mask_mode:str = 'patch',mask_nums:int = 3,
e_layers:int=3, d_layers:int=3, d_model=256, n_heads=16, shared_embedding=True, d_ff:int=512,
norm:str='BatchNorm', attn_dropout:float=0.4, dropout:float=0., act:str="gelu",
res_attention:bool=True, pre_norm:bool=False, store_attn:bool=False,
pe:str='sincos', learn_pe:bool=False, head_dropout = 0,
head_type = "prediction", individual = False,
y_range:Optional[tuple]=None, verbose:bool=False, **kwargs):
self.context_points = context_points
self.c_in = c_in
self.target_dim = target_dim
self.patch_len = patch_len
self.stride = stride
self.num_patch = (max(self.context_points, self.patch_len)-self.patch_len) // self.stride + 1
self.mask_mode = mask_mode
self.mask_nums = mask_nums
self.e_layers = e_layers
self.d_layers = d_layers
self.d_model = d_model
self.n_heads = n_heads
self.shared_embedding = shared_embedding
self.d_ff = d_ff
self.dropout = dropout
self.attn_dropout = attn_dropout
self.head_dropout = head_dropout
self.act = act
self.head_type = head_type
self.initializer_range = 0.02
super().__init__(**kwargs)
|