• HybridSN 高光谱分类网络的优化


    1.HybridSN 

    class HybridSN(nn.Module):
        def __init__(self):
            super(HybridSN, self).__init__()
            self.conv1 = nn.Conv3d(1, 8, (7, 3, 3), stride=1, padding=0)
            self.conv2 = nn.Conv3d(8, 16, (5, 3, 3), stride=1, padding=0)
            self.conv3 = nn.Conv3d(16, 32, (3, 3, 3), stride=1, padding=0)
            self.conv4 = nn.Conv2d(576, 64, kernel_size=3, stride=1, padding=0)
            self.bn1 = nn.BatchNorm2d(64)
            self.fc1 = nn.Linear(18496, 256)
            self.dropout1 = nn.Dropout(p=0.4)
            self.fc2 = nn.Linear(256, 128)
            self.dropout2 = nn.Dropout(p=0.4)
            self.fc3 = nn.Linear(128, class_num)
    
        def forward(self, x):
            out = self.conv1(x)
            out = self.conv2(out)
            out = self.conv3(out)
            #print(batch)
            out = out.reshape(batch, 576, 19, 19)
            out = self.conv4(out)
            out = self.bn1(out)
            out = F.relu(out)
            out = out.view(-1, 64 * 17 * 17)
            out = self.fc1(out)
            out = F.relu(out)
            out = self.dropout1(out)
            out = self.fc2(out)
            out = F.relu(out)
            out = self.dropout2(out)
            out = self.fc3(out)
            return out

    2.添加SENet

    class SELayer(nn.Module):
        def __init__(self, channel, r=16):
            super(SELayer, self).__init__()
            self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
            self.fc = nn.Sequential(
                nn.Linear(channel, channel // r),
                nn.ReLU(),
                nn.Linear(channel // r, channel),
                nn.Sigmoid()
            )
    
        def forward(self, x):
            a, b, _, _ = x.size()
            y = self.avg_pool(x).view(a, b)
            y = self.fc(y).view(a, b, 1, 1)
            return x * y.expand_as(x)
    class HybridSN(nn.Module):
          def __init__(self, num_classes=16):
        '''
            self.senet = SELayer(64)
        '''
          def forward(self, x):
        '''
            out = self.conv3_2d(out)
            out = self.senet(out)
        '''

    3.为什么每次测试结果会不同

    Pytorch中,网络有train和eval两种模式

    在训练模式model.train() :启用 BatchNormalization 和 Dropout

    在测试模式model.eval() :不启用 BatchNormalization 和 Dropout

     可以看出训练模式和测试模式是不同的,导致我们在训练好的模型在每次测试中是不同的

  • 相关阅读:
    基于AOP实现Ibatis的缓存配置过期策略
    Step by Step构建自己的ORM系列配置管理层
    云计算从基础到应用架构系列云计算的演进
    设计模式系列桥接模式
    云计算从基础到应用架构系列云计算的概念
    云计算从基础到应用架构系列虚拟化的技术(上)
    设计模式系列装饰模式
    typeof和GetType区别
    白话学习MVC(四)URL路由
    五、DirectX编程
  • 原文地址:https://www.cnblogs.com/yuzhenfu/p/13509743.html
Copyright © 2020-2023  润新知