• 深度学习之 cnn 进行 CIFAR10 分类


    深度学习之 cnn 进行 CIFAR10 分类

    import torchvision as tv
    import torchvision.transforms as transforms
    from torchvision.transforms import ToPILImage
    show = ToPILImage()
    import torch as t
    import torch.nn as nn
    import torch.nn.functional as F
    
    
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,0.5,0.5), (0.5, 0.5, 0.5)),
    ])
    
    # 下载数据
    trainset = tv.datasets.CIFAR10(root=".",train=True, download=True, transform=transform)
    trainloader = t.utils.data.DataLoader(trainset, batch_size=4,shuffle=True, num_workers=2)
    testset = tv.datasets.CIFAR10('.', train=False, download=True, transform=transform)
    
    testloader = t.utils.data.DataLoader(testset, batch_size=4,shuffle=False,num_workers=2)
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
    
    # 网络
    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.conv1 = nn.Conv2d(3, 6, 5)
            self.conv2 = nn.Conv2d(6, 16, 5)
            self.fc1 = nn.Linear(16 * 5 * 5, 120)
            self.fc2 = nn.Linear(120, 84)
            self.fc3 = nn.Linear(84, 10)
            
        def forward(self, x):
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
            x = F.max_pool2d(F.relu(self.conv2(x)), 2)
            x = x.view(x.size()[0], -1)
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
    
    net = Net()
    
    
    from torch import optim
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr = 0.001, momentum=0.9)
    from torch.autograd import Variable
    
    for epoch in range(2):
        running_loss = 0.0
        for i,data in enumerate(trainloader, 0):
            inputs, labels = data
            inputs, labels = Variable(inputs), Variable(labels)
            
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            
            optimizer.step()
            
            running_loss += loss.data[0]
            if i % 2000 == 1999:
                print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
        print('Finished Training')
    
    # 测试
    correct = 0
    total = 0
    for data in testloader:
        images, labels = data
        outputs = net(Variable(images))
    #     print(outputs.data)
        _, predicted = t.max(outputs.data, 1)
        print(outputs.data,_, predicted)
        total += labels.size(0)
        correct += (predicted == labels).sum()
    
    print('10000张测式中: %d %%' % (100 * correct / total) )
    
  • 相关阅读:
    FZU 1683 纪念SlingShot ★(矩阵 && 求和 && 线性变换)
    POJ 2356 Find a multiple (鸽巢原理)
    HDU 1568 Fibonacci ★(取科学计数法)
    POJ 2356 Find a multiple (鸽巢原理)
    POJ 1222 & 1681 & 1830 & 3185 开关灯问题 (高斯消元 & 异或方程组)
    HDU 1588 Gauss Fibonacci ★(矩阵 && 求和)
    HDU 1568 Fibonacci ★(取科学计数法)
    C#数据绑定技巧
    AHP分析
    AHP分析
  • 原文地址:https://www.cnblogs.com/htoooth/p/8668245.html
Copyright © 2020-2023  润新知