pchen182224 commited on
Commit
8bfb2d8
·
verified ·
1 Parent(s): 48e84fd

Delete configuration_LightGTS.py

Browse files
Files changed (1) hide show
  1. configuration_LightGTS.py +0 -39
configuration_LightGTS.py DELETED
@@ -1,39 +0,0 @@
1
- from transformers import PretrainedConfig
2
- from typing import Optional
3
- import math
4
-
5
-
6
- class LightGTSConfig(PretrainedConfig):
7
-
8
- model_type = "LightGTS"
9
-
10
-
11
- def __init__(self, context_points:int = 512, c_in:int = 1, target_dim:int = 96, patch_len:int = 32, stride:int = 32, mask_mode:str = 'patch',mask_nums:int = 3,
12
- e_layers:int=3, d_layers:int=3, d_model=256, n_heads=16, shared_embedding=True, d_ff:int=512,
13
- norm:str='BatchNorm', attn_dropout:float=0.4, dropout:float=0., act:str="gelu",
14
- res_attention:bool=True, pre_norm:bool=False, store_attn:bool=False,
15
- pe:str='sincos', learn_pe:bool=False, head_dropout = 0,
16
- head_type = "prediction", individual = False,
17
- y_range:Optional[tuple]=None, verbose:bool=False, **kwargs):
18
-
19
- self.context_points = context_points
20
- self.c_in = c_in
21
- self.target_dim = target_dim
22
- self.patch_len = patch_len
23
- self.stride = stride
24
- self.num_patch = (max(self.context_points, self.patch_len)-self.patch_len) // self.stride + 1
25
- self.mask_mode = mask_mode
26
- self.mask_nums = mask_nums
27
- self.e_layers = e_layers
28
- self.d_layers = d_layers
29
- self.d_model = d_model
30
- self.n_heads = n_heads
31
- self.shared_embedding = shared_embedding
32
- self.d_ff = d_ff
33
- self.dropout = dropout
34
- self.attn_dropout = attn_dropout
35
- self.head_dropout = head_dropout
36
- self.act = act
37
- self.head_type = head_type
38
- self.initializer_range = 0.02
39
- super().__init__(**kwargs)