ICNet:
ICNet for Real-Time Semantic Segmentation on High-Resolution Images (2018)
PDF: https://arxiv.org/pdf/1704.08545.pdf PyTorch: https://github.com/shanglianlm0525/PyTorch-Networks
1 概述
ICNet是一个基于PSPNet的实时语义分割网络,设计目的是减少PSPNet推断耗时的同时保持较高的检测精度。ICNet可以在1024 × 2048分辨率下保持30fps运行。
2 ICNet
ICNet综合低分辨率图像的处理速度和高分辨率图像的推断质量,提出图像级联框架逐步细化分割预测。
2-1 Cascade Feature Fusion
PyTorch代码:
class CascadeFeatureFusion(nn.Module):
def __init__(self,low_channels, high_channels, out_channels, num_classes):
super(CascadeFeatureFusion, self).__init__()
self.conv_low = Conv3x3BNReLU(low_channels,out_channels,1,dilation=2)
self.conv_high = Conv3x3BNReLU(high_channels,out_channels,1,dilation=1)
self.relu = nn.ReLU(inplace=True)
self.conv_low_cls = nn.Conv2d(out_channels, num_classes, 1, bias=False)
def forward(self, x_low, x_high):
x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True)
x_low = self.conv_low(x_low)
x_high = self.conv_high(x_high)
out = self.relu(x_low + x_high)
x_low_cls = self.conv_low_cls(x_low)
return out,
2-2 Cascade Label Guidance
为了加强学习过程,我们采用级联标签引导策略。它使用1 / 16,1 / 8和1/4的地面真值标签来指导低,中和高分辨率输入的学习阶段。在测试阶段,简单地放弃低和中指导操作,只保留高分辨率分支。这种级联标签指导在降低推理成本的同时不会降低我们的最终结果的准确性非常有效。
2-3 Network Architecture
具体改进如下:
分支 | 过程 | 耗时 |
低分辨率 | 在中分辨率的1/16输出的基础上,再缩放到1/32.经过卷积后,然后使用几个dilated convolution扩展接受野但不缩小尺寸,最终以原图的1/32大小输出feature map。 | 虽然层数较多,但是分辨率低,速度快,且与分支二共享一部分权重 |
中分辨率 | 以原图的1/2的分辨率作为输入,经过卷积后以1/8缩放,得到原图的1/16大小feature map,再将低分辨率分支的输出feature map通过CFF(cascade feature fusion)单元相融合得到最终输出。值得注意的是:低分辨率和中分辨率的卷积参数是共享的。 | 有17个卷积层,与分支一共享一部分权重,与分支一一起一共耗时6ms |
高分辨率 | 原图输入,经过卷积后以1/8缩放,得到原图的1/8大小的feature map,再将中分辨率处理后的输出通过CFF单元融合 | 有3个卷积层,虽然分辨率高,因为少,耗时为9ms |
PyTorch代码:
class Backbone(nn.Module):
def __init__(self, pyramids=[1,2,3,6]):
super(Backbone, self).__init__()
self.pretrained = torchvision.models.resnet50(pretrained=True)
def forward(self, x):
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
class PyramidPoolingModule(nn.Module):
def __init__(self, pyramids=[1,2,3,6]):
super(PyramidPoolingModule, self).__init__()
self.pyramids = pyramids
def forward(self, x):
feat = x
height, width = x.shape[2:]
for bin_size in self.pyramids:
feat_x = F.adaptive_avg_pool2d(x, output_size=bin_size)
feat_x = F.interpolate(feat_x, size=(height, width), mode='bilinear', align_corners=True)
feat = feat + feat_x
return feat
class ICNet(nn.Module):
def __init__(self, num_classes):
super(ICNet, self).__init__()
self.conv_sub1 = nn.Sequential(
Conv3x3BNReLU(3, 32, 2),
Conv3x3BNReLU(32, 32, 2),
Conv3x3BNReLU(32, 64, 2)
)
self.backbone = Backbone()
self.ppm = PyramidPoolingModule()
self.cff_12 = CascadeFeatureFusion(128, 64, 128, num_classes)
self.cff_24 = CascadeFeatureFusion(2048, 512, 128, num_classes)
self.conv_cls = nn.Conv2d(128, num_classes, 1, bias=False)
def forward(self, x):
# sub 1
x_sub1 = self.conv_sub1(x)
# sub 2
x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear')
_, x_sub2, _, _ = self.backbone(x_sub2)
# sub 4
x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear')
_, _, _, x_sub4 = self.backbone(x_sub4)
# add PyramidPoolingModule
x_sub4 = self.ppm(x_sub4)
outs = list()
x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2)
outs.append(x_24_cls)
# x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1)
outs.append(x_12_cls)
up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear')
up_x2 = self.conv_cls(up_x2)
outs.append(up_x2)
up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear')
outs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outs.reverse()
return
3 Experimental