• pytorch(十六):多层感知机多分类


    一、基本代码

    import torch
    import torch.optim as optim
    from torch.nn import functional as F
    import torch.nn as nn
    import torchvision
    
    # [ch_out,ch_in]
    
    w1, b1 = torch.randn(200, 784, requires_grad=True),
             torch.zeros(200, requires_grad=True)
    w2, b2 = torch.randn(200, 200, requires_grad=True),
             torch.zeros(200, requires_grad=True)
    w3, b3 = torch.randn(10, 200, requires_grad=True),
             torch.zeros(10, requires_grad=True)
    
    torch.nn.init.kaiming_normal_(w1)
    torch.nn.init.kaiming_normal_(w2)
    torch.nn.init.kaiming_normal_(w3)
    
    def forward(x):
        x = x@w1.t() + b1
        x = F.relu(x)
        x = x@w2.t() + b2
        x = F.relu(x)
        x = x@w3.t() + b3
        x = F.relu(x)     # logits
        
        return x
    
    learning_rate = 0.01
    epochs = 1
    batch_size = 64
    
    train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('datasets/mnist_data',
                    train=True,
                    download=True,
                    transform=torchvision.transforms.Compose([
                    torchvision.transforms.ToTensor(),                       # 数据类型转化
                    torchvision.transforms.Normalize((0.1307, ), (0.3081, )) # 数据归一化处理
        ])), batch_size=batch_size,shuffle=True)
    
    test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('datasets/mnist_data/',
                    train=False,
                    download=True,
                    transform=torchvision.transforms.Compose([
                    torchvision.transforms.ToTensor(),
                    torchvision.transforms.Normalize((0.1307, ), (0.3081, ))
        ])),batch_size=batch_size,shuffle=False)
    
    optimizer = optim.SGD([w1,b1,w2,b2,w3,b3], lr = learning_rate)
    criteon = nn.CrossEntropyLoss()
    
    for epoch in range(epochs):
        for batch_idx,(data,target) in enumerate(train_loader):
            data = data.view(-1,28*28)
            logits = forward(data)
            loss = criteon(logits,target)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if batch_idx % 100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]	Loss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.item()))
    
    
        test_loss = 0
        correct = 0
        for data, target in test_loader:
            data = data.view(-1, 28 * 28)
            logits = forward(data)
            test_loss += criteon(logits, target).item()
    
            pred = logits.data.max(1)[1]
            correct += pred.eq(target.data).sum()
    
        test_loss /= len(test_loader.dataset)
        print('
    Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)
    '.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))

    二、截图

     

  • 相关阅读:
    几个不错的网页载入页面
    .NET 中关于日期时间的格式化处理
    防止网站内容被人小偷和采集的ASP代码
    .Net学习资源集
    Net程序如何防止被注入(整站通用)
    一个采集入库生成本地文件的几个FUCTION
    网页数据采集小偷
    浅谈自动采集程序及入库
    网页图片处理JS代码整理
    spark导入工程后,出现一些错误
  • 原文地址:https://www.cnblogs.com/zhangxianrong/p/14026475.html
Copyright © 2020-2023  润新知