Skip to content

Commit

Permalink
[Enhance] Refactor inverted residual (open-mmlab#164)
Browse files Browse the repository at this point in the history
* [Enhance] Unifed InvertedResidual in MobileNetV2 and FastSCNN

* [Enhance] Unifed InvertedResidual in MobileNetV2 and FastSCNN
  • Loading branch information
xvjiarui authored Sep 27, 2020
1 parent 924571e commit f86c24d
Show file tree
Hide file tree
Showing 8 changed files with 50 additions and 189 deletions.
2 changes: 1 addition & 1 deletion configs/fastscnn/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
### Cityscapes
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | download |
|------------|-----------|-----------|--------:|----------|----------------|------:|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Fast-SCNN | Fast-SCNN | 512x1024 | 80000 | 8.4 | 63.61 | 69.06 | - | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-cae6c46a.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-20200807_165744.log.json) |
| Fast-SCNN | Fast-SCNN | 512x1024 | 80000 | 8.4 | 63.61 | 69.06 | - | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-f5096c79.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-20200807_165744.log.json) |
70 changes: 0 additions & 70 deletions configs/fastscnn/fast_scnn_4x8_80k_lr0.12_pascal.py

This file was deleted.

2 changes: 1 addition & 1 deletion mmseg/models/backbones/fast_scnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from mmseg.models.decode_heads.psp_head import PPM
from mmseg.ops import resize
from mmseg.utils import InvertedResidual
from ..builder import BACKBONES
from ..utils.inverted_residual import InvertedResidual


class LearningToDownsample(nn.Module):
Expand Down
92 changes: 1 addition & 91 deletions mmseg/models/backbones/mobilenet_v2.py
Original file line number Diff line number Diff line change
@@ -1,102 +1,12 @@
import logging

import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm

from ..builder import BACKBONES
from ..utils import make_divisible


class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""

def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))

layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)

def forward(self, x):

def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)

if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)

return out
from ..utils import InvertedResidual, make_divisible


@BACKBONES.register_module()
Expand Down
5 changes: 4 additions & 1 deletion mmseg/models/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .res_layer import ResLayer
from .self_attention_block import SelfAttentionBlock

__all__ = ['ResLayer', 'SelfAttentionBlock', 'make_divisible']
__all__ = [
'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual'
]
Original file line number Diff line number Diff line change
@@ -1,22 +1,29 @@
from mmcv.cnn import ConvModule, build_norm_layer
from torch import nn
from mmcv.cnn import ConvModule
from torch import nn as nn
from torch.utils import checkpoint as cp


class InvertedResidual(nn.Module):
"""Inverted residual module.
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""

def __init__(self,
Expand All @@ -27,47 +34,59 @@ def __init__(self,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6')):
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]

assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
self.use_res_connect = self.stride == 1 \
and in_channels == out_channels

layers = []
if expand_ratio != 1:
# pw
layers.append(
ConvModule(
in_channels,
hidden_dim,
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
# dw
ConvModule(
hidden_dim,
hidden_dim,
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
padding=dilation,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
# pw-linear
nn.Conv2d(hidden_dim, out_channels, 1, 1, 0, bias=False),
build_norm_layer(norm_cfg, out_channels)[1],
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)

def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)

def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)

if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
return self.conv(x)
out = _inner_forward(x)

return out
3 changes: 1 addition & 2 deletions mmseg/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from .collect_env import collect_env
from .inverted_residual_module import InvertedResidual
from .logger import get_root_logger

__all__ = ['get_root_logger', 'collect_env', 'InvertedResidual']
__all__ = ['get_root_logger', 'collect_env']
2 changes: 1 addition & 1 deletion tests/test_utils/test_inverted_residual_module.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
import torch

from mmseg.utils import InvertedResidual
from mmseg.models.utils import InvertedResidual


def test_inv_residual():
Expand Down

0 comments on commit f86c24d

Please sign in to comment.