forked from open-mmlab/mmsegmentation
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathswinb-debug.py
205 lines (205 loc) · 6.76 KB
/
swinb-debug.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
norm_cfg = dict(type='SyncBN', requires_grad=True)
backbone_norm_cfg = dict(type='LN', requires_grad=True)
num_classes=9,
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=128,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
strides=(4, 2, 2, 2),
out_indices=(0, 1, 2, 3),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.3,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', requires_grad=True)),
decode_head=dict(
type='UPerHead',
in_channels=[128, 256, 512, 1024],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=num_classes,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=512,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=num_classes,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
train_cfg=dict(),
test_cfg=dict(mode='whole'))
dataset_type = 'Region'
data_root = 'data/breast/split_L1_10xRegion9918'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='RandomCrop', crop_size=(512, 512), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size=(512, 512), pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data_root = 'data/breast/split_L1_10xRegion9918'
data = dict(
samples_per_gpu=2,
workers_per_gpu=4,
train=dict(
type='Region',
data_root=data_root,
img_dir='train',
ann_dir='train',
#img_dir=[data_root+'/train'],#'train',
#ann_dir=[data_root+'/train'],#'train',
pipeline=train_pipeline,
#pipeline=[
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations', reduce_zero_label=True),
# dict(type='RandomCrop', crop_size=(512, 512), cat_max_ratio=0.75),
# dict(type='RandomFlip', prob=0.5),
# dict(type='PhotoMetricDistortion'),
# dict(
# type='Normalize',
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True),
# dict(type='Pad', size=(512, 512), pad_val=0, seg_pad_val=255),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_semantic_seg'])
#]
),
val=dict(
type='Region',
data_root=data_root,
img_dir='val',
ann_dir='val',
#img_dir=[data_root+'/val'],#'val',
#ann_dir=[data_root+'/val'],#'val',
pipeline=test_pipeline,
#pipeline=[
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(512, 512),
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(
# type='Normalize',
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img'])
# ])
#]
),
test=dict(
type='Region',
data_root=data_root,
#img_dir=[data_root+'/test'],
#img_dir=[data_root+'/test'],
ann_dir='test',
img_dir='test',
pipeline=test_pipeline,
#pipeline=[
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(512, 512),
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(
# type='Normalize',
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img'])
# ])
#]
))
log_config = dict(
interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
optimizer = dict(
type='AdamW',
lr=3e-05,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys=dict(
absolute_pos_embed=dict(decay_mult=0.0),
relative_position_bias_table=dict(decay_mult=0.0),
norm=dict(decay_mult=0.0)),
head=dict(lr_mult=10.0)))
optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.0)
lr_config = dict(
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-06,
power=1.0,
min_lr=0.0,
by_epoch=False)
runner = dict(type='IterBasedRunner', max_iters=10000)
checkpoint_config = dict(by_epoch=False, interval=1000)
evaluation = dict(interval=1000, metric='mIoU', pre_eval=True)
work_dir = 'data/breast/split_L1_10xRegion9918/save/one_swin_base_384'