• 常用的神经网络结构和pytorch实现(二)


    Resnet系列

    关于resnet方面的基础介绍直接看这篇博客,不再叙述

    https://www.cnblogs.com/henuliulei/p/13564468.html

    resnet34的样子如下面

     resnet50,101等和34不同的点在于使用了

    更深的瓶颈结构:
     
     

    作者探索的更深的网络。 考虑到时间花费,将原来的building block(残差学习结构)改为瓶颈结构,如上图。首端和末端的1x1卷积用来削减和恢复维度,相比于原本结构,只有中间3x3成为瓶颈部分。
    这两种结构的时间复杂度相似。此时投影法映射带来的参数成为不可忽略的部分(以为输入维度的增大),所以要使用zero padding的同等映射。
    替换原本ResNet的残差学习结构,同时也可以增加结构的数量,网络深度得以增加。生成了ResNet-50,ResNet-101,ResNet-152. 随着深度增加,因为解决了退化问题,性能不断提升。
    作者最后在Cifar-10上尝试了1202层的网络,结果在训练误差上与一个较浅的110层的相近,但是测试误差要比110层大1.5%。作者认为是采用了太深的网络,发生了过拟合。
    常用的网络模型的结构图如下,其中ResNet-50拥有2500万参数

     resnet34的网络模型为

     1 #coding:utf8
     2 from .BasicModule import BasicModule
     3 from torch import nn
     4 from torch.nn import functional as F
     5 
     6 class ResidualBlock(nn.Module):
     7     '''
     8     实现子module: Residual Block
     9     '''
    10     def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
    11         super(ResidualBlock, self).__init__()
    12         self.left = nn.Sequential(
    13                 nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=False),
    14                 nn.BatchNorm2d(outchannel),
    15                 nn.ReLU(inplace=True),
    16                 nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False),
    17                 nn.BatchNorm2d(outchannel) )
    18         self.right = shortcut
    19 
    20     def forward(self, x):
    21         out = self.left(x)
    22         residual = x if self.right is None else self.right(x)
    23         out += residual
    24         return F.relu(out)
    25 
    26 class ResNet34(BasicModule):
    27     '''
    28     实现主module:ResNet34
    29     ResNet34包含多个layer,每个layer又包含多个Residual block
    30     用子module来实现Residual block,用_make_layer函数来实现layer
    31     '''
    32     def __init__(self, num_classes=2):
    33         super(ResNet34, self).__init__()
    34         self.model_name = 'resnet34'
    35 
    36         # 前几层: 图像转换
    37         self.pre = nn.Sequential(
    38                 nn.Conv2d(3, 64, 7, 2, 3, bias=False),
    39                 nn.BatchNorm2d(64),
    40                 nn.ReLU(inplace=True),
    41                 nn.MaxPool2d(3, 2, 1))
    42         
    43         # 重复的layer,分别有3,46,3个residual block
    44         self.layer1 = self._make_layer( 64, 128, 3)
    45         self.layer2 = self._make_layer( 128, 256, 4, stride=2)
    46         self.layer3 = self._make_layer( 256, 512, 6, stride=2)
    47         self.layer4 = self._make_layer( 512, 512, 3, stride=2)
    48 
    49         #分类用的全连接
    50         self.fc = nn.Linear(512, num_classes)
    51     
    52     def _make_layer(self,  inchannel, outchannel, block_num, stride=1):
    53         '''
    54         构建layer,包含多个residual block
    55         '''
    56         shortcut = nn.Sequential(
    57                 nn.Conv2d(inchannel,outchannel,1,stride, bias=False),
    58                 nn.BatchNorm2d(outchannel))
    59         
    60         layers = []
    61         layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
    62         
    63         for i in range(1, block_num):
    64             layers.append(ResidualBlock(outchannel, outchannel))
    65         return nn.Sequential(*layers)
    66         
    67     def forward(self, x):
    68         x = self.pre(x)
    69         
    70         x = self.layer1(x)
    71         x = self.layer2(x)
    72         print(x)
    73         x = self.layer3(x)
    74         x = self.layer4(x)
    75 
    76         x = F.avg_pool2d(x, 7)
    77         x = x.view(x.size(0), -1)
    78         return self.fc(x)
    View Code

    resnet50模型为

     1 # coding:utf8
     2 from .BasicModule import BasicModule
     3 from torch import nn
     4 from torch.nn import functional as F
     5 
     6 
     7 class ResidualBlock(nn.Module):
     8     '''
     9     实现子module: Residual Block
    10     '''
    11 
    12     def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
    13         super(ResidualBlock, self).__init__()
    14         self.left = nn.Sequential(
    15             nn.Conv2d(inchannel, int(outchannel/4), 1,1,bias=False),
    16             nn.BatchNorm2d(int(outchannel/4)),
    17             nn.ReLU(inplace= True),
    18             nn.Conv2d(int(outchannel/4), int(outchannel/4), 3, stride, 1, bias=False),
    19             nn.BatchNorm2d(int(outchannel/4)),
    20             nn.ReLU(inplace=True),
    21             nn.Conv2d(int(outchannel/4), int(outchannel), 1, 1,bias=False),
    22             nn.BatchNorm2d(outchannel))
    23         self.right = shortcut
    24         self.inchannel = inchannel
    25 
    26     def forward(self, x):
    27         out = self.left(x)
    28         residual = x if self.right is None else self.right(x)
    29         print(self.inchannel,out.shape,residual.shape)
    30         out += residual
    31         return F.relu(out)
    32 
    33 
    34 class ResNet50(BasicModule):
    35     '''
    36     实现主module:ResNet34
    37     ResNet34包含多个layer,每个layer又包含多个Residual block
    38     用子module来实现Residual block,用_make_layer函数来实现layer
    39     '''
    40 
    41     def __init__(self, num_classes=2):
    42         super(ResNet50, self).__init__()
    43         self.model_name = 'resnet34'
    44 
    45         # 前几层: 图像转换
    46         self.pre = nn.Sequential(
    47             nn.Conv2d(3, 64, 7, 2, 3, bias=False),
    48             nn.BatchNorm2d(64),
    49             nn.ReLU(inplace=True),
    50             nn.MaxPool2d(3, 2, 1))
    51 
    52         # 重复的layer,分别有3,46,3个residual block
    53         self.layer1 = self._make_layer(64, 256, 3)
    54         self.layer2 = self._make_layer(256, 512, 4, stride=2)
    55         self.layer3 = self._make_layer(512, 1024, 6, stride=2)
    56         self.layer4 = self._make_layer(1024, 2048, 3, stride=2)
    57 
    58         # 分类用的全连接
    59         self.fc = nn.Linear(2048, num_classes)
    60 
    61     def _make_layer(self, inchannel, outchannel, block_num, stride=1):
    62         '''
    63         构建layer,包含多个residual block
    64         '''
    65         shortcut = nn.Sequential(
    66             nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
    67             nn.BatchNorm2d(outchannel))
    68 
    69         layers = []
    70         layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
    71 
    72         for i in range(1, block_num):
    73             layers.append(ResidualBlock(outchannel, outchannel))
    74         return nn.Sequential(*layers)
    75 
    76     def forward(self, x):
    77         x = self.pre(x)
    78 
    79         x = self.layer1(x)
    80         x = self.layer2(x)
    81         x = self.layer3(x)
    82         x = self.layer4(x)
    83 
    84         x = F.avg_pool2d(x, 7)
    85         x = x.view(x.size(0), -1)
    86         return self.fc(x)
    View Code

    MobileNet v1

    作者:你的雷哥
    本文版权归作者和博客园共有,欢迎转载,但未经作者同意必须在文章页面给出原文连接,否则保留追究法律责任的权利。
  • 相关阅读:
    java基础(4)--javadoc文档与命令
    java基础(3)--pulic class与class的区别
    java基础(2)--main方法讲解
    java基础(1)--注释
    shell 测试文件状态运算符
    shell 算术运算符
    linux free命令详解
    shell 基本语法
    linux vim编辑器优化
    linux shell介绍
  • 原文地址:https://www.cnblogs.com/henuliulei/p/13606680.html
Copyright © 2020-2023  润新知