Image Segmentation
Transformers
PyTorch
upernet
mccaly commited on
Commit
ca97bc4
·
1 Parent(s): de2d361

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +232 -0
config.json ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+
3
+ "model_type": "unet",
4
+ "model_config": {
5
+ "encoder": {
6
+ "type": "resnet18",
7
+ "pretrained": true
8
+ },
9
+ "decoder": {
10
+ "type": "unet",
11
+ "num_classes": 104
12
+ }
13
+ }
14
+ }
15
+ "norm_cfg": {
16
+ "type": "SyncBN",
17
+ "requires_grad": true
18
+ },
19
+ "model": {
20
+ "type": "EncoderDecoder",
21
+ "pretrained": "pretrained/swin_small_patch4_window7_224.pth",
22
+ "backbone": {
23
+ "type": "SwinTransformer",
24
+ "embed_dim": 96,
25
+ "depths": [2, 2, 18, 2],
26
+ "num_heads": [3, 6, 12, 24],
27
+ "window_size": 7,
28
+ "mlp_ratio": 4.0,
29
+ "qkv_bias": true,
30
+ "qk_scale": null,
31
+ "drop_rate": 0.0,
32
+ "attn_drop_rate": 0.0,
33
+ "drop_path_rate": 0.3,
34
+ "ape": false,
35
+ "patch_norm": true,
36
+ "out_indices": [0, 1, 2, 3],
37
+ "use_checkpoint": false
38
+ },
39
+ "decode_head": {
40
+ "type": "UPerHead",
41
+ "in_channels": [96, 192, 384, 768],
42
+ "in_index": [0, 1, 2, 3],
43
+ "pool_scales": [1, 2, 3, 6],
44
+ "channels": 512,
45
+ "dropout_ratio": 0.1,
46
+ "num_classes": 104,
47
+ "norm_cfg": {
48
+ "type": "SyncBN",
49
+ "requires_grad": true
50
+ },
51
+ "align_corners": false,
52
+ "loss_decode": {
53
+ "type": "CrossEntropyLoss",
54
+ "use_sigmoid": false,
55
+ "loss_weight": 1.0
56
+ }
57
+ },
58
+ "auxiliary_head": {
59
+ "type": "FCNHead",
60
+ "in_channels": 384,
61
+ "in_index": 2,
62
+ "channels": 256,
63
+ "num_convs": 1,
64
+ "concat_input": false,
65
+ "dropout_ratio": 0.1,
66
+ "num_classes": 104,
67
+ "norm_cfg": {
68
+ "type": "SyncBN",
69
+ "requires_grad": true
70
+ },
71
+ "align_corners": false,
72
+ "loss_decode": {
73
+ "type": "CrossEntropyLoss",
74
+ "use_sigmoid": false,
75
+ "loss_weight": 0.4
76
+ }
77
+ },
78
+ "train_cfg": {},
79
+ "test_cfg": {
80
+ "mode": "whole"
81
+ }
82
+ },
83
+ "dataset_type": "CustomDataset",
84
+ "data_root": "./data/FoodSeg103/Images/",
85
+ "img_norm_cfg": {
86
+ "mean": [123.675, 116.28, 103.53],
87
+ "std": [58.395, 57.12, 57.375],
88
+ "to_rgb": true
89
+ },
90
+ "crop_size": [512, 1024],
91
+ "train_pipeline": [
92
+ {
93
+ "type": "LoadImageFromFile"
94
+ },
95
+ {
96
+ "type": "LoadAnnotations"
97
+ },
98
+ {
99
+ "type": "Resize",
100
+ "img_scale": [2048, 1024],
101
+ "ratio_range": [0.5, 2.0]
102
+ },
103
+ {
104
+ "type": "RandomCrop",
105
+ "crop_size": [512, 1024],
106
+ "cat_max_ratio": 0.75
107
+ },
108
+ {
109
+ "type": "RandomFlip",
110
+ "prob": 0.5
111
+ },
112
+ {
113
+ "type": "PhotoMetricDistortion"
114
+ },
115
+ {
116
+ "type": "Normalize",
117
+ "mean": [123.675, 116.28, 103.53],
118
+ "std": [58.395, 57.12, 57.375],
119
+ "to_rgb": true
120
+ },
121
+ {
122
+ "type": "Pad",
123
+ "size": [512, 1024],
124
+ "pad_val": 0,
125
+ "seg_pad_val": 255
126
+ },
127
+ {
128
+ "type": "DefaultFormatBundle"
129
+ },
130
+ {
131
+ "type": "Collect",
132
+ "keys": ["img", "gt_semantic_seg"]
133
+ }
134
+ ],
135
+ "test_pipeline": [
136
+ {
137
+ "type": "LoadImageFromFile"
138
+ },
139
+ {
140
+ "type": "MultiScaleFlipAug",
141
+ "img_scale": [2048, 1024],
142
+ "flip": false,
143
+ "transforms": [
144
+ {
145
+ "type": "Resize",
146
+ "keep_ratio": true
147
+ },
148
+ {
149
+ "type": "RandomFlip"
150
+ },
151
+ {
152
+ "type": "Normalize",
153
+ "mean": [123.675, 116.28, 103.53],
154
+ "std": [58.395, 57.12, 57.375],
155
+ "to_rgb": true
156
+ },
157
+ {
158
+ "type": "ImageToTensor",
159
+ "keys": ["img"]
160
+ },
161
+ {
162
+ "type": "Collect",
163
+ "keys": ["img"]
164
+ }
165
+ ]
166
+ }
167
+ ],
168
+ "data": {
169
+ "samples_per_gpu": 2,
170
+ "workers_per_gpu": 2,
171
+ "train": {
172
+ "type": "CustomDataset",
173
+ "data_root": "./data/FoodSeg103/Images/",
174
+ "img_dir": "img_dir/train",
175
+ "ann_dir": "ann_dir/train",
176
+ "pipeline": [
177
+ {
178
+ "type": "LoadImageFromFile"
179
+ },
180
+ {
181
+ "type": "LoadAnnotations"
182
+ },
183
+ {
184
+ "type": "Resize",
185
+ "img_scale": [2048, 1024],
186
+ "ratio_range": [0.5, 2.0]
187
+ },
188
+ {
189
+ "type": "RandomCrop",
190
+ "crop_size": [512, 1024],
191
+ "cat_max_ratio": 0.75
192
+ },
193
+ {
194
+ "type": "RandomFlip",
195
+ "prob": 0.5
196
+ },
197
+ {
198
+ "type": "PhotoMetricDistortion"
199
+ },
200
+ {
201
+ "type": "Normalize",
202
+ "mean": [123.675, 116.28, 103.53],
203
+ "std": [58.395, 57.12, 57.375],
204
+ "to_rgb": true
205
+ },
206
+ {
207
+ "type": "Pad",
208
+ "size": [512, 1024],
209
+ "pad_val": 0,
210
+ "seg_pad_val": 255
211
+ },
212
+ {
213
+ "type": "DefaultFormatBundle"
214
+ },
215
+ {
216
+ "type": "Collect",
217
+ "keys": ["img", "gt_semantic_seg"]
218
+ }
219
+ ]
220
+ },
221
+ "val": {
222
+ "type": "CustomDataset",
223
+ "data_root": "./data/FoodSeg103/Images/",
224
+ "img_dir": "img_dir/test",
225
+ "ann_dir": "ann_dir/test",
226
+ "pipeline": [
227
+ {
228
+ "type": "LoadImageFromFile"
229
+ },
230
+ {
231
+ "type": "MultiScaleFlipAug",
232
+ "img_scale": [2048, 1024],