11
11
import re
12
12
from collections import OrderedDict
13
13
from mmdet .models .registry import BACKBONES
14
- from convs .operators .tsmconv import TSMConv
14
+ from alignshift .operators .tsmconv import TSMConv
15
15
import torch .utils .checkpoint as cp
16
16
from mmdet .models .utils import build_conv_layer , build_norm_layer
17
17
# mybn = nn.BatchNorm3d
@@ -29,12 +29,12 @@ class _DenseLayer(nn.Sequential):
29
29
def __init__ (self , num_input_features , growth_rate , bn_size , drop_rate , n_fold , memory_efficient = False ):
30
30
super (_DenseLayer , self ).__init__ ()
31
31
self .add_module ('norm1' , build_norm_layer (norm_cfg , num_input_features , postfix = 1 )[1 ]),
32
- self .add_module ('relu1' , nn .ReLU (inplace = True )),
32
+ self .add_module ('relu1' , nn .ReLU (inplace = False )),
33
33
self .add_module ('conv1' , TSMConv (num_input_features , bn_size *
34
34
growth_rate , kernel_size = 1 , stride = 1 ,
35
35
bias = False , n_fold = n_fold )),
36
36
self .add_module ('norm2' , build_norm_layer (norm_cfg , bn_size * growth_rate , postfix = 1 )[1 ]),
37
- self .add_module ('relu2' , nn .ReLU (inplace = True )),
37
+ self .add_module ('relu2' , nn .ReLU (inplace = False )),
38
38
self .add_module ('conv2' , TSMConv (bn_size * growth_rate , growth_rate ,
39
39
kernel_size = 3 , stride = 1 , padding = 1 ,
40
40
bias = False , n_fold = n_fold )),
@@ -83,7 +83,7 @@ def __init__(self, num_input_features, num_output_features):
83
83
self .add_module ('relu' , nn .ReLU (inplace = True ))
84
84
self .add_module ('conv' , TSMConv (num_input_features , num_output_features ,
85
85
kernel_size = 1 , stride = 1 , bias = False , tsm = False ))
86
- self .add_module ('pool' , nn .AvgPool3d (kernel_size = [1 , 2 , 2 ], stride = [1 , 2 , 2 ]))
86
+ self .add_module ('pool' , nn .AvgPool3d (kernel_size = [1 , 2 , 2 ], stride = [1 , 2 , 2 ]))#, padding=[0,1,1]
87
87
88
88
class _Reduction_z (nn .Sequential ):
89
89
def __init__ (self , input_features , input_slice ):
@@ -98,8 +98,7 @@ def __init__(self,
98
98
n_cts = 3 ,
99
99
fpn_finest_layer = 1 ,
100
100
memory_efficient = True ,
101
- n_fold = 8 ,
102
- syncbn = True ):
101
+ n_fold = 8 ,):
103
102
super ().__init__ ()
104
103
self .depth = 121
105
104
self .feature_upsample = True
@@ -126,16 +125,17 @@ def __init__(self,
126
125
# Each denseblock
127
126
num_features = num_init_features
128
127
for i , num_layers in enumerate (block_config ):
129
- block = _DenseBlock (num_layers = num_layers , num_input_features = num_features ,memory_efficient = memory_efficient ,
130
- bn_size = bn_size , growth_rate = growth_rate , drop_rate = drop_rate , n_fold = n_fold )
128
+ block = _DenseBlock (num_layers = num_layers , num_input_features = num_features ,
129
+ bn_size = bn_size , growth_rate = growth_rate , drop_rate = drop_rate ,
130
+ n_fold = self .n_fold , memory_efficient = memory_efficient )
131
131
self .add_module ('denseblock%d' % (i + 1 ), block )
132
132
num_features = num_features + num_layers * growth_rate
133
+ reductionz = _Reduction_z (num_features , self .n_cts )
134
+ self .add_module ('reductionz%d' % (i + 1 ), reductionz )
133
135
if i != len (block_config ) - 1 :
134
136
trans = _Transition (num_input_features = num_features , num_output_features = num_features // 2 )
135
137
self .add_module ('transition%d' % (i + 1 ), trans )
136
138
num_features = num_features // 2
137
- reductionz = _Reduction_z (num_features , self .n_cts )
138
- self .add_module ('reductionz%d' % (i + 1 ), reductionz )
139
139
140
140
# Final batch norm
141
141
# self.add_module('norm5', nn.BatchNorm2d(num_features))
@@ -159,38 +159,40 @@ def __init__(self,
159
159
nn .init .kaiming_uniform_ (layer .weight , a = 1 )
160
160
nn .init .constant_ (layer .bias , 0 )
161
161
self .init_weights ()
162
- if syncbn :
163
- self = nn .SyncBatchNorm .convert_sync_batchnorm (self )
162
+ # if syncbn:
163
+ # self = nn.SyncBatchNorm.convert_sync_batchnorm(self)
164
164
165
165
def forward (self , x ):
166
166
x = self .conv0 (x )
167
167
x = self .norm0 (x )
168
- relu0 = self .relu0 (x )
169
- pool0 = self .pool0 (relu0 )
168
+ x = self .relu0 (x )
169
+ x = self .pool0 (x )
170
170
171
- db1 = self .denseblock1 (pool0 )
172
- ts1 = self .transition1 (db1 )
171
+ x = self .denseblock1 (x )
172
+ redc1 = self .reductionz1 (x )
173
+ x = self .transition1 (x )
173
174
174
- db2 = self .denseblock2 (ts1 )
175
- ts2 = self .transition2 (db2 )
176
175
177
- db3 = self .denseblock3 (ts2 )
176
+ x = self .denseblock2 (x )
177
+ redc2 = self .reductionz2 (x )
178
+ x = self .transition2 (x )
178
179
180
+
181
+ x = self .denseblock3 (x )
182
+ redc3 = self .reductionz3 (x )
179
183
# truncated since here since we find it works better in DeepLesion
180
184
# ts3 = self.transition3(db3)
181
185
# db4 = self.denseblock4(ts3)
182
186
183
- if self .feature_upsample :
184
- ftmaps = [relu0 [:,:,self .mid_ct ,...], db1 [:,:,self .mid_ct ,...], db2 [:,:,self .mid_ct ,...], db3 [:,:,self .mid_ct ,...]]
185
- x = self .lateral4 (ftmaps [- 1 ])
186
- for p in range (3 , self .fpn_finest_layer - 1 , - 1 ):
187
- x = F .interpolate (x , scale_factor = 2 , mode = "nearest" )
188
- y = ftmaps [p - 1 ]
189
- lateral = getattr (self , 'lateral%d' % p )(y )
190
- x += lateral
191
- return [x ]
192
- else :
193
- return [db3 ]
187
+ # if self.feature_upsample:
188
+ ftmaps = [None , redc1 .squeeze (2 ), redc2 .squeeze (2 ), redc3 .squeeze (2 )]
189
+ x = self .lateral4 (ftmaps [- 1 ])
190
+ for p in range (3 , self .fpn_finest_layer - 1 , - 1 ):
191
+ x = F .interpolate (x , scale_factor = 2 , mode = "nearest" )
192
+ y = ftmaps [p - 1 ]
193
+ lateral = getattr (self , 'lateral%d' % p )(y )
194
+ x += lateral
195
+ return [x ]
194
196
195
197
def init_weights (self , pretrained = True ):
196
198
pattern = re .compile (
0 commit comments