BiliSakura commited on
Commit
b8ca59f
·
verified ·
1 Parent(s): c0b0573

Update all files for SkySensepp

Browse files
Files changed (1) hide show
  1. hr/configuration_skysensepp.py +168 -0
hr/configuration_skysensepp.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HuggingFace PretrainedConfig for the SkySense++ model."""
2
+
3
+ from transformers import PretrainedConfig
4
+
5
+
6
+ class SkySensePPConfig(PretrainedConfig):
7
+ """Configuration class for the SkySense++ multi-modal remote sensing model.
8
+
9
+ This config captures all hyperparameters for the three backbones
10
+ (HR / S2 / S1), the fusion encoder, the modality-completion VAE,
11
+ and the decode head.
12
+
13
+ Args:
14
+ hr_arch (str): SwinTransformerV2 architecture variant. Default ``"huge"``.
15
+ hr_img_size (int): HR input image size. Default ``512``.
16
+ hr_patch_size (int): HR patch size. Default ``4``.
17
+ hr_in_channels (int): HR input channels. Default ``3``.
18
+ hr_window_size (int): HR window attention size. Default ``8``.
19
+ hr_drop_path_rate (float): HR stochastic-depth rate. Default ``0.2``.
20
+ hr_out_indices (tuple): HR output stage indices. Default ``(0, 1, 2, 3)``.
21
+ hr_use_abs_pos_embed (bool): Use absolute position embeddings in HR.
22
+ Default ``False``.
23
+ hr_with_cp (bool): Use activation checkpointing in HR. Default ``True``.
24
+ hr_pad_small_map (bool): Pad small feature maps in HR. Default ``True``.
25
+
26
+ s2_img_size (tuple): S2 input image size. Default ``(16, 16)``.
27
+ s2_patch_size (int): S2 patch size. Default ``16``.
28
+ s2_in_channels (int): S2 input channels. Default ``10``.
29
+ s2_embed_dims (int): S2 embedding dimensions. Default ``1024``.
30
+ s2_num_layers (int): S2 transformer layers. Default ``24``.
31
+ s2_num_heads (int): S2 attention heads. Default ``16``.
32
+ s2_mlp_ratio (int): S2 MLP expansion ratio. Default ``4``.
33
+ s2_out_indices (tuple): S2 output layer indices. Default ``(5, 11, 17, 23)``.
34
+ s2_drop_path_rate (float): S2 stochastic-depth rate. Default ``0.3``.
35
+
36
+ s1_img_size (tuple): S1 input image size. Default ``(16, 16)``.
37
+ s1_patch_size (int): S1 patch size. Default ``16``.
38
+ s1_in_channels (int): S1 input channels. Default ``2``.
39
+ s1_embed_dims (int): S1 embedding dimensions. Default ``1024``.
40
+ s1_num_layers (int): S1 transformer layers. Default ``24``.
41
+ s1_num_heads (int): S1 attention heads. Default ``16``.
42
+
43
+ fusion_input_dims (int): Fusion encoder input dims. Default ``2816``.
44
+ fusion_embed_dims (int): Fusion encoder embed dims. Default ``1024``.
45
+ fusion_num_layers (int): Fusion encoder layers. Default ``24``.
46
+ fusion_num_heads (int): Fusion encoder heads. Default ``16``.
47
+ fusion_with_cls_token (bool): Use CLS token in fusion. Default ``True``.
48
+ fusion_output_cls_token (bool): Output CLS token from fusion.
49
+ Default ``True``.
50
+
51
+ decode_in_channels (list): Decode head input channel list.
52
+ Default ``[704, 704, 1408, 2816, 1024]``.
53
+ decode_channels (int): Decode head internal channels. Default ``512``.
54
+ decode_num_classes (int): Number of segmentation classes. Default ``65``.
55
+
56
+ vocabulary_size (int): Vocabulary size for masked-label tokenisation.
57
+ Default ``64``.
58
+ sources (list): Active modality sources. Default ``["hr", "s2", "s1"]``.
59
+ use_modal_vae (bool): Enable modality-completion VAE. Default ``True``.
60
+ calendar_time (int): Calendar time embedding size. Default ``366``.
61
+ vae_subfolder (str): Subfolder for VAE weights (diffusers layout). Default ``"modality_vae"``.
62
+ VAE loads from ``{path}/{vae_subfolder}/diffusion_pytorch_model.safetensors``,
63
+ with fallback to ``{path}/modality_vae.safetensors``.
64
+ """
65
+
66
+ model_type = "skysensepp"
67
+
68
+ def __init__(
69
+ self,
70
+ # --- Backbone HR (SwinTransformerV2MSL) ---
71
+ hr_arch: str = "huge",
72
+ hr_img_size: int = 512,
73
+ hr_patch_size: int = 4,
74
+ hr_in_channels: int = 3,
75
+ hr_window_size: int = 8,
76
+ hr_drop_path_rate: float = 0.2,
77
+ hr_out_indices: tuple = (0, 1, 2, 3),
78
+ hr_use_abs_pos_embed: bool = False,
79
+ hr_with_cp: bool = True,
80
+ hr_pad_small_map: bool = True,
81
+ # --- Backbone S2 (VisionTransformerMSL) ---
82
+ s2_img_size: tuple = (16, 16),
83
+ s2_patch_size: int = 4,
84
+ s2_in_channels: int = 10,
85
+ s2_embed_dims: int = 1024,
86
+ s2_num_layers: int = 24,
87
+ s2_num_heads: int = 16,
88
+ s2_mlp_ratio: int = 4,
89
+ s2_out_indices: tuple = (5, 11, 17, 23),
90
+ s2_drop_path_rate: float = 0.3,
91
+ # --- Backbone S1 (VisionTransformerMSL) ---
92
+ s1_img_size: tuple = (16, 16),
93
+ s1_patch_size: int = 4,
94
+ s1_in_channels: int = 2,
95
+ s1_embed_dims: int = 1024,
96
+ s1_num_layers: int = 24,
97
+ s1_num_heads: int = 16,
98
+ # --- Fusion (TransformerEncoder) ---
99
+ fusion_input_dims: int = 2816,
100
+ fusion_embed_dims: int = 1024,
101
+ fusion_num_layers: int = 24,
102
+ fusion_num_heads: int = 16,
103
+ fusion_with_cls_token: bool = True,
104
+ fusion_output_cls_token: bool = True,
105
+ # --- Decode Head ---
106
+ decode_in_channels: list = None,
107
+ decode_channels: int = 512,
108
+ decode_num_classes: int = 65,
109
+ # --- General ---
110
+ vocabulary_size: int = 64,
111
+ sources: list = None,
112
+ use_modal_vae: bool = True,
113
+ calendar_time: int = 366,
114
+ vae_subfolder: str = "modality_vae",
115
+ **kwargs,
116
+ ):
117
+ super().__init__(**kwargs)
118
+
119
+ # Backbone HR
120
+ self.hr_arch = hr_arch
121
+ self.hr_img_size = hr_img_size
122
+ self.hr_patch_size = hr_patch_size
123
+ self.hr_in_channels = hr_in_channels
124
+ self.hr_window_size = hr_window_size
125
+ self.hr_drop_path_rate = hr_drop_path_rate
126
+ self.hr_out_indices = tuple(hr_out_indices)
127
+ self.hr_use_abs_pos_embed = hr_use_abs_pos_embed
128
+ self.hr_with_cp = hr_with_cp
129
+ self.hr_pad_small_map = hr_pad_small_map
130
+
131
+ # Backbone S2
132
+ self.s2_img_size = tuple(s2_img_size)
133
+ self.s2_patch_size = s2_patch_size
134
+ self.s2_in_channels = s2_in_channels
135
+ self.s2_embed_dims = s2_embed_dims
136
+ self.s2_num_layers = s2_num_layers
137
+ self.s2_num_heads = s2_num_heads
138
+ self.s2_mlp_ratio = s2_mlp_ratio
139
+ self.s2_out_indices = tuple(s2_out_indices)
140
+ self.s2_drop_path_rate = s2_drop_path_rate
141
+
142
+ # Backbone S1
143
+ self.s1_img_size = tuple(s1_img_size)
144
+ self.s1_patch_size = s1_patch_size
145
+ self.s1_in_channels = s1_in_channels
146
+ self.s1_embed_dims = s1_embed_dims
147
+ self.s1_num_layers = s1_num_layers
148
+ self.s1_num_heads = s1_num_heads
149
+
150
+ # Fusion
151
+ self.fusion_input_dims = fusion_input_dims
152
+ self.fusion_embed_dims = fusion_embed_dims
153
+ self.fusion_num_layers = fusion_num_layers
154
+ self.fusion_num_heads = fusion_num_heads
155
+ self.fusion_with_cls_token = fusion_with_cls_token
156
+ self.fusion_output_cls_token = fusion_output_cls_token
157
+
158
+ # Decode Head
159
+ self.decode_in_channels = decode_in_channels or [704, 704, 1408, 2816, 1024]
160
+ self.decode_channels = decode_channels
161
+ self.decode_num_classes = decode_num_classes
162
+
163
+ # General
164
+ self.vocabulary_size = vocabulary_size
165
+ self.sources = sources or ["hr", "s2", "s1"]
166
+ self.use_modal_vae = use_modal_vae
167
+ self.calendar_time = calendar_time
168
+ self.vae_subfolder = vae_subfolder