• 动手创建 SSD 目标检测框架


    参考:单发多框检测(SSD)

    本文代码被我放置在 Github:https://github.com/XinetAI/CVX/blob/master/app/gluoncvx/ssd.py

    关于 SSD 的训练见:https://github.com/XinetAI/CVX/blob/master/目标检测/训练SSD.ipynb

    虽然李沐大神的教程关于 SSD 的讲解很不错,但是大都是函数式的编程,本文我将 SSD 的几个基本组件进行封装,使得 SSD 可以像堆积木一样来进行组织。基网络你可以换成你想要的任意卷积模块,而其余的组件你可以将其当作像 nn.Dense 这样的对象来使用!


    先载入一些必备包:

    %matplotlib inline
    import d2lzh as d2l
    from mxnet import autograd, contrib, gluon, image, init, nd
    from mxnet.gluon import loss as gloss, nn
    import time
    

    基础组件

    单发多框检测(single shot multibox detection,SSD)[1] 主要由一个基础网络块和若干个多尺度特征块串联而成。其中基础网络块用来从原始图像中抽取特征,因此一般会选择常用的深度卷积神经网络。大体上可以将 SSD 分为:基网络层,类别预测层,边界框预测层以及高和宽减半块四个个不同的类别。

    类别预测层与边界框预测层的设计思路与使用全局平均池化替换全连接层的想法类似。

    类别预测层

    下面我们来完成类别预测层:

    class ClassPredictor(nn.Block):
        def __init__(self, num_anchors, num_classes, **kwargs):
            super().__init__(**kwargs)
            self.num_classes = num_classes  # 类别数目
            self.num_anchors = num_anchors  # 边界框数目
            # 类别预测层
            self.cls_predictor = nn.Conv2D(
                self.num_anchors * (self.num_classes + 1), kernel_size=3, padding=1)
    
        def forward(self, Y):
            cls_preds = self.cls_predictor(Y)
            return cls_preds
    

    ClassPredictor 生成了与某一个尺度的特征图相同尺度的 num_classes + 1 个类别特征图(包括背景)。

    下面我们模拟两个不同尺度的特征图来测试效果:

    Y = nd.zeros((2, 8, 20, 20))  # 批量的 20 x 20 的 8 个特征图
    cls = ClassPredictor(5, 10)  # 实例化
    cls.initialize()           # 初始化
    cls_preds = cls(Y)       # 类特征图
    
    Y1 = nd.zeros((2, 16, 10, 10))  # 批量的 10 x 10 的 16 个特征图
    cls1 = ClassPredictor(5, 10)  # 实例化
    cls1.initialize()           # 初始化
    cls_preds1 = cls1(Y1)       # 类特征图2
    
    cls_preds.shape, cls_preds1.shape
    
    ((2, 55, 20, 20), (2, 55, 10, 10))
    

    我们需要将它们变形成统一的格式并将多尺度的预测连结,从而让后续计算更简单。

    def flatten_pred(pred):  # 转换为通道维度在后
        return pred.transpose((0, 2, 3, 1)).flatten()
    
    def concat_preds(preds): # 拼接不同尺度的类别预测
        return nd.concat(*[flatten_pred(p) for p in preds], dim=1)
    
    concat_preds([cls_preds, cls_preds1]).shape  # 拼接多个尺度特征图的类特征图
    
    (2, 27500)
    

    边界框预测层

    同样使用一个类完成:

    class BBoxPredictor(nn.Block):
        def __init__(self, num_anchors, **kwargs):
            super().__init__(**kwargs)
            self.num_anchors = num_anchors
            # 边界框预测层
            self.bbox_predictor = nn.Conv2D(
                self.num_anchors * 4, kernel_size=3, padding=1)
    
        def forward(self, Y):
            bbox_preds = self.bbox_predictor(Y)
            return bbox_preds
    

    测试效果:

    Y = nd.zeros((2, 8, 20, 20))  # 批量的 20 x 20 的 8 个特征图
    bbox = BBoxPredictor(10)  # 实例化
    bbox.initialize()           # 初始化
    bbox_preds = bbox(Y)       # 边界框特征图
    bbox_preds.shape
    
    (2, 40, 20, 20)
    

    ClassPredictor 生成了与某一个尺度的特征图相同尺度的 num_anchors x 4 个边界框坐标的特征图。

    高和宽减半块

    class DownSampleBlock(nn.Block):
        def __init__(self, num_channels, **kwargs):
            '''
            高和宽减半块
            '''
            super().__init__(**kwargs)
            self.block = nn.Sequential()
            with self.block.name_scope():
                for _ in range(2):
                    self.block.add(nn.Conv2D(num_channels, kernel_size=3, padding=1),
                                   nn.BatchNorm(in_channels=num_channels),
                                   nn.Activation('relu'))
                self.block.add(nn.MaxPool2D(2))
    
        def forward(self, X):
            return self.block(X)
    

    测试效果:

    Y = nd.zeros((2, 8, 20, 20))  # 批量的 20 x 20 的 8 个特征图
    down_sample = DownSampleBlock(10)
    down_sample.initialize()
    down_sample(Y).shape
    
    (2, 10, 10, 10)
    

    基网络

    为了简洁这里仅仅设计一个简单的基网络:

    class BaseNet(nn.Block):
        def __init__(self, **kwargs):
            '''
            基网络
            '''
            super().__init__(**kwargs)
            self.block = nn.Sequential()
            with self.block.name_scope():
                for num_filters in [16, 32, 64]:
                    self.block.add(DownSampleBlock(num_filters))
    
        def forward(self, X):
            return self.block(X)
    

    测试效果:

    Y = nd.zeros((2, 8, 512, 512))  # 批量的 20 x 20 的 8 个特征图
    base_net = BaseNet()
    base_net.initialize()
    base_net(Y).shape
    
    (2, 64, 64, 64)
    
    
    

    锚框生成

    class AnchorY(nn.Block):
        def __init__(self, block, size, ratio, **kwargs):
            super().__init__(**kwargs)
            self.block = block
            self._size = size
            self._ratio = ratio
    
        def forward(self, X):
            Y = self.block(X)
            anchors = contrib.ndarray.MultiBoxPrior(
                Y, sizes=self._size, ratios=self._ratio)
            return Y, anchors
    

    测试效果:

    block = BaseNet()
    anchor_gen = AnchorY(block, .4, .7)
    anchor_gen.initialize()
    X = nd.zeros((2, 8, 256, 256))
    Y, anchors = anchor_gen(X)
    Y.shape, anchors.shape
    
    ((2, 64, 32, 32), (1, 1024, 4))
    

    SSD 组装

    class TinySSD(nn.Block):
        def __init__(self, sizes, ratios, num_classes, **kwargs):
            super().__init__(**kwargs)
            sizes, ratios, self.num_classes = sizes, ratios, num_classes
            self.num_anchors = len(sizes[0]) + len(ratios[0]) - 1
            for i in range(5):
                # 即赋值语句self.blk_i = get_blk(i)
                setattr(self, 'blk_%d' % i, self.block(i))
                setattr(self, 'cls_%d' % i, ClassPredictor(self.num_anchors,
                                                           self.num_classes))
                setattr(self, 'bbox_%d' % i, BBoxPredictor(self.num_anchors))
                setattr(self, 'anchor_%d' % i, AnchorY(
                    getattr(self, 'blk_%d' % i), sizes[i], ratios[i]))
    
        def block(self, i):
            if i == 0:
                blk = BaseNet()
            elif i == 4:
                blk = nn.GlobalMaxPool2D()
            else:
                blk = DownSampleBlock(128)
            return blk
    
        def forward(self, X):
            anchors, cls_preds, bbox_preds = [None] * 5, [None] * 5, [None] * 5
            for i in range(5):
                # getattr(self, 'blk_%d' % i)即访问self.blk_i
                Y, anchors[i] = getattr(self, 'anchor_%d' % i)(X)
                cls_preds[i] = getattr(self, 'cls_%d' % i)(Y)
                bbox_preds[i] = getattr(self, 'bbox_%d' % i)(Y)
                X = Y
            # reshape函数中的0表示保持批量大小不变
            cls_preds = concat_preds(cls_preds).reshape(
                (0, -1, self.num_classes + 1))
            return nd.concat(*anchors, dim=1), cls_preds, concat_preds(bbox_preds)
    

    测试代码:

    sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79],
             [0.88, 0.961]]
    ratios = [[1, 2, 0.5]] * 5
    num_classes = 1
    
    X = nd.zeros((32, 3, 256, 256))
    net = TinySSD(sizes, ratios, num_classes)
    net.initialize()
    anchors, cls_preds, bbox_preds = net(X)
    
    print('output anchors:', anchors.shape)
    print('output class preds:', cls_preds.shape)
    print('output bbox preds:', bbox_preds.shape)
    
    output anchors: (1, 5444, 4)
    output class preds: (32, 5444, 2)
    output bbox preds: (32, 21776)
    

    网络结构:

    net
    
    TinySSD(
      (blk_0): BaseNet(
        (block): Sequential(
          (0): DownSampleBlock(
            (block): Sequential(
              (0): Conv2D(3 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=16)
              (2): Activation(relu)
              (3): Conv2D(16 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=16)
              (5): Activation(relu)
              (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
            )
          )
          (1): DownSampleBlock(
            (block): Sequential(
              (0): Conv2D(16 -> 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=32)
              (2): Activation(relu)
              (3): Conv2D(32 -> 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=32)
              (5): Activation(relu)
              (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
            )
          )
          (2): DownSampleBlock(
            (block): Sequential(
              (0): Conv2D(32 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64)
              (2): Activation(relu)
              (3): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
              (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64)
              (5): Activation(relu)
              (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
            )
          )
        )
      )
      (cls_0): ClassPredictor(
        (cls_predictor): Conv2D(64 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (bbox_0): BBoxPredictor(
        (bbox_predictor): Conv2D(64 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (anchor_0): AnchorY(
        (block): BaseNet(
          (block): Sequential(
            (0): DownSampleBlock(
              (block): Sequential(
                (0): Conv2D(3 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=16)
                (2): Activation(relu)
                (3): Conv2D(16 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=16)
                (5): Activation(relu)
                (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
              )
            )
            (1): DownSampleBlock(
              (block): Sequential(
                (0): Conv2D(16 -> 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=32)
                (2): Activation(relu)
                (3): Conv2D(32 -> 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=32)
                (5): Activation(relu)
                (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
              )
            )
            (2): DownSampleBlock(
              (block): Sequential(
                (0): Conv2D(32 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64)
                (2): Activation(relu)
                (3): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
                (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64)
                (5): Activation(relu)
                (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
              )
            )
          )
        )
      )
      (blk_1): DownSampleBlock(
        (block): Sequential(
          (0): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (2): Activation(relu)
          (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (5): Activation(relu)
          (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
        )
      )
      (cls_1): ClassPredictor(
        (cls_predictor): Conv2D(128 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (bbox_1): BBoxPredictor(
        (bbox_predictor): Conv2D(128 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (anchor_1): AnchorY(
        (block): DownSampleBlock(
          (block): Sequential(
            (0): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (2): Activation(relu)
            (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (5): Activation(relu)
            (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
          )
        )
      )
      (blk_2): DownSampleBlock(
        (block): Sequential(
          (0): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (2): Activation(relu)
          (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (5): Activation(relu)
          (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
        )
      )
      (cls_2): ClassPredictor(
        (cls_predictor): Conv2D(128 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (bbox_2): BBoxPredictor(
        (bbox_predictor): Conv2D(128 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (anchor_2): AnchorY(
        (block): DownSampleBlock(
          (block): Sequential(
            (0): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (2): Activation(relu)
            (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (5): Activation(relu)
            (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
          )
        )
      )
      (blk_3): DownSampleBlock(
        (block): Sequential(
          (0): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (2): Activation(relu)
          (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
          (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
          (5): Activation(relu)
          (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
        )
      )
      (cls_3): ClassPredictor(
        (cls_predictor): Conv2D(128 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (bbox_3): BBoxPredictor(
        (bbox_predictor): Conv2D(128 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (anchor_3): AnchorY(
        (block): DownSampleBlock(
          (block): Sequential(
            (0): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (2): Activation(relu)
            (3): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            (4): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128)
            (5): Activation(relu)
            (6): MaxPool2D(size=(2, 2), stride=(2, 2), padding=(0, 0), ceil_mode=False)
          )
        )
      )
      (blk_4): GlobalMaxPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True)
      (cls_4): ClassPredictor(
        (cls_predictor): Conv2D(128 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (bbox_4): BBoxPredictor(
        (bbox_predictor): Conv2D(128 -> 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      )
      (anchor_4): AnchorY(
        (block): GlobalMaxPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True)
      )
    )
    

    下面你便可以使用该网络进行目标检测了。

    
    

    1. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C. Y., & Berg, A. C. (2016, October). Ssd: Single shot multibox detector. In European conference on computer vision (pp. 21-37). Springer, Cham. ↩︎

  • 相关阅读:
    [leetcode]存在重复
    [leetcode]旋转数组
    git使用方法(持续更新)
    [LeetCode]从排序数组中删除重复项
    Communications link failure--分析之(JDBC的多种超时情况)
    云主机挂载磁盘
    hadoop对于压缩文件的支持
    linux 转移mysql文件操作流程
    MindManager2018 修改过期时间 配置文件路径
    Innodb 中 RR 隔离级别能否防止幻读?
  • 原文地址:https://www.cnblogs.com/q735613050/p/10561031.html
Copyright © 2020-2023  润新知