• resnet mindspore


    resnet50 pytorch版本实现

    torchvision.models.resnet

    import torch.nn as nn
    import math
    import torch.utils.model_zoo as model_zoo
    
    
    __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
               'resnet152']
    
    
    model_urls = {
        'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
        'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
        'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
        'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
        'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
    }
    
    
    def conv3x3(in_planes, out_planes, stride=1):
        """3x3 convolution with padding"""
        return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                         padding=1, bias=False)
    
    
    class BasicBlock(nn.Module):
        expansion = 1
    
        def __init__(self, inplanes, planes, stride=1, downsample=None):
            super(BasicBlock, self).__init__()
            self.conv1 = conv3x3(inplanes, planes, stride)
            self.bn1 = nn.BatchNorm2d(planes)
            self.relu = nn.ReLU(inplace=True)
            self.conv2 = conv3x3(planes, planes)
            self.bn2 = nn.BatchNorm2d(planes)
            self.downsample = downsample
            self.stride = stride
    
        def forward(self, x):
            residual = x
    
            out = self.conv1(x)
            out = self.bn1(out)
            out = self.relu(out)
    
            out = self.conv2(out)
            out = self.bn2(out)
    
            if self.downsample is not None:
                residual = self.downsample(x)
    
            out += residual
            out = self.relu(out)
    
            return out
    
    
    class Bottleneck(nn.Module):
        expansion = 4
    
        def __init__(self, inplanes, planes, stride=1, downsample=None):
            super(Bottleneck, self).__init__()
            self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
            self.bn1 = nn.BatchNorm2d(planes)
            self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                                   padding=1, bias=False)
            self.bn2 = nn.BatchNorm2d(planes)
            self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
            self.bn3 = nn.BatchNorm2d(planes * 4)
            self.relu = nn.ReLU(inplace=True)
            self.downsample = downsample
            self.stride = stride
    
        def forward(self, x):
            residual = x
    
            out = self.conv1(x)
            out = self.bn1(out)
            out = self.relu(out)
    
            out = self.conv2(out)
            out = self.bn2(out)
            out = self.relu(out)
    
            out = self.conv3(out)
            out = self.bn3(out)
    
            if self.downsample is not None:
                residual = self.downsample(x)
    
            out += residual
            out = self.relu(out)
    
            return out
    
    
    class ResNet(nn.Module):
    
        def __init__(self, block, layers, num_classes=1000):
            self.inplanes = 64
            super(ResNet, self).__init__()
            self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                                   bias=False)
            self.bn1 = nn.BatchNorm2d(64)
            self.relu = nn.ReLU(inplace=True)
            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
            self.layer1 = self._make_layer(block, 64, layers[0])
            self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
            self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
            self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
            self.avgpool = nn.AvgPool2d(7, stride=1)
            self.fc = nn.Linear(512 * block.expansion, num_classes)
    
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                    m.weight.data.normal_(0, math.sqrt(2. / n))
                elif isinstance(m, nn.BatchNorm2d):
                    m.weight.data.fill_(1)
                    m.bias.data.zero_()
    
        def _make_layer(self, block, planes, blocks, stride=1):
            downsample = None
            if stride != 1 or self.inplanes != planes * block.expansion:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion,
                              kernel_size=1, stride=stride, bias=False),
                    nn.BatchNorm2d(planes * block.expansion),
                )
    
            layers = []
            layers.append(block(self.inplanes, planes, stride, downsample))
            self.inplanes = planes * block.expansion
            for i in range(1, blocks):
                layers.append(block(self.inplanes, planes))
    
            return nn.Sequential(*layers)
    
        def forward(self, x):
            x = self.conv1(x)
            x = self.bn1(x)
            x = self.relu(x)
            x = self.maxpool(x)
    
            x = self.layer1(x)
            x = self.layer2(x)
            x = self.layer3(x)
            x = self.layer4(x)
    
            x = self.avgpool(x)
            x = x.view(x.size(0), -1)
            x = self.fc(x)
    
            return x
    
    
    def resnet18(pretrained=False, **kwargs):
        """Constructs a ResNet-18 model.
    
        Args:
            pretrained (bool): If True, returns a model pre-trained on ImageNet
        """
        model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
        if pretrained:
            model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
        return model
    
    
    
    def resnet34(pretrained=False, **kwargs):
        """Constructs a ResNet-34 model.
    
        Args:
            pretrained (bool): If True, returns a model pre-trained on ImageNet
        """
        model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
        if pretrained:
            model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
        return model
    
    
    
    def resnet50(pretrained=False, **kwargs):
        """Constructs a ResNet-50 model.
    
        Args:
            pretrained (bool): If True, returns a model pre-trained on ImageNet
        """
        model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
        if pretrained:
            model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
        return model
    
    
    
    def resnet101(pretrained=False, **kwargs):
        """Constructs a ResNet-101 model.
    
        Args:
            pretrained (bool): If True, returns a model pre-trained on ImageNet
        """
        model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
        if pretrained:
            model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
        return model
    
    
    
    def resnet152(pretrained=False, **kwargs):
        """Constructs a ResNet-152 model.
    
        Args:
            pretrained (bool): If True, returns a model pre-trained on ImageNet
        """
        model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
        if pretrained:
            model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
        return model

    想记录的是一般在其他项目中利用resnet50,一般会把resnet50的最后两层架构去掉,原因是resnet50的后两层架构为了匹配ImageNet数据集而实现的

     x = self.avgpool(x)

    是为了将x的h and w变成 1*1,这样[B,C,H,W] 就变成了[B,C,1,1]

    x = x.view(x.size(0), -1)

    将4维变成2维

    x = self.fc(x) (self.fc = nn.Linear(512 * block.expansion, num_classes)

    nn.Linear的输入要求是2维,全连接层,

    为了防止遗忘,还是要记录一下各种层出不穷的bug。

    1 一个八竿子打不着的bug:

    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    在有这句代码的时候,运行时会出现unsupport convert [key:kernal_size, value:(1,4)] to PyData的报错。(报错语句:HorizontalMaxPool2d类但是将网络摘出来运行一遍完全没有报错。后来实在找不出问题,就将这句话删了(因为感觉这句代码目前还没有起作用),结果报错就没了,,,,

    2 python的main函数可有可无,在有

    if __name__ == '__main__':
        main()

    的情况下,如果在main函数外面是一个print语句,则会执行该语句,如果print语句在另外一个函数里,并且没有被调用的话,则不会执行print语句。

    3 bug

     代码:

    import mindspore
    import mindspore.nn as nn
    import numpy as np
    from mindspore import Tensor
    
    loss = nn.SoftmaxCrossEntropyWithLogits()
    np.random.seed(0)
    logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)
    labels_np = np.ones([1,]).astype(np.int32)
    labels = Tensor(labels_np, mindspore.float32)
    output = loss(logits, labels)
    print(output)

    当labels的类型为float32时,就会报出上面的错,但是若是labels的类型为int32,就不会报错

    mindspore.nn.SoftmaxCrossEntropyWithLogits(sparse=False )  #sparse默认为False
    输入:
      logits (Tensor) - Tensor of shape (N, C). Data type must be float16 or float32.
      labels (Tensor) - Tensor of shape (N, ). If sparse is True, The type of labels is int32 or int64.
        If sparse is False, the type of labels is the same as the type of logits.
     
     
     
  • 相关阅读:
    第四章JAVA EE基础知识
    第二章代码审计环境搭建
    第三章代码审计辅助工具简介
    python json.dumps 中文需要注意的事项
    multiprocessing 使用实践
    Python中*args和**kwargs的使用实践
    python 继承学习记录
    关于vue-element-admin启动项目遇到的一些问题
    @RequestParam
    前端菜单点击切换样式,菜单控制iframe
  • 原文地址:https://www.cnblogs.com/loyolh/p/14811913.html
Copyright © 2020-2023  润新知