• 简单了解pytorch的forward


    import torch.nn as nn
    import torch.nn.functional as F
    import torch.optim as optim
    from torch.autograd import Variable
    import torch
    
    class Net(nn.Module):  # 需要继承这个类
        def __init__(self):
            super(Net, self).__init__()
            # 建立了两个卷积层,self.conv1, self.conv2,注意,这些层都是不包含激活函数的
            self.conv1 = nn.Conv2d(1, 6, 5)  # 1 input image channel, 6 output channels, 5x5 square convolution kernel
            self.conv2 = nn.Conv2d(6, 16, 5)
            # 三个全连接层
            self.fc1 = nn.Linear(16 * 5 * 5, 120)  # an affine operation: y = Wx + b
            self.fc2 = nn.Linear(120, 84)
            self.fc3 = nn.Linear(84, 10)
    
        def forward(self, x):  # 注意,2D卷积层的输入data维数是 batchsize*channel*height*width
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # Max pooling over a (2, 2) window
            x = F.max_pool2d(F.relu(self.conv2(x)), 2)  # If the size is a square you can only specify a single number
            x = x.view(-1, self.num_flat_features(x))
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
    
            print(x)
            print('y=--------')
            return x
    
        def num_flat_features(self, x):
            size = x.size()[1:]  # all dimensions except the batch dimension
            num_features = 1
            for s in size:
                num_features *= s
            return num_features
    
    
    net = Net()
    # create your optimizer
    optimizer = optim.SGD(net.parameters(), lr = 0.01)
    num_iteations = 20
    input = Variable(torch.randn(2, 1, 32, 32))
    print('input=',input)
    #target = Variable(torch.Tensor([5],dtype=torch.long))
    target = Variable(torch.LongTensor([5,7]))
    # in your training loop:
    for i in range(num_iteations):
        optimizer.zero_grad() # zero the gradient buffers,如果不归0的话,gradients会累加
    
        output = net(input) # 这里就体现出来动态建图了,你还可以传入其他的参数来改变网络的结构
        criterion = nn.CrossEntropyLoss()
        loss = criterion(output, target)
        loss.backward() # 得到grad,i.e.给Variable.grad赋值
        optimizer.step() # Does the update,i.e. Variable.data -= learning_rate*Variable.grad

    这里是给出的一个代码。

    init只是规定了conv的输入通道数量、输出通道数量和卷积核尺寸。

    然后在神经网络中,充当卷积层的是forward部分。

    input = Variable(torch.randn(2, 1, 32, 32))    #batchsize,channel,height,width
    target = Variable(torch.LongTensor([5,7]))     #我希望两个神经网络,第一个等于5,第二个等于7.当然随便两个数。(不代表5*7维矩阵呀)
  • 相关阅读:
    并发编程学习笔记(二十九、Unsafe)
    并发编程学习笔记(二十八、ConcurrentHashMap,Java8 ConcurrentHashMap)
    并发编程学习笔记(二十七、ConcurrentHashMap,Java7 ConcurrentHashMap)
    并发编程学习笔记(二十六、ConcurrentHashMap,Java8 HashMap简述)
    位运算符
    并发编程学习笔记(二十五、ConcurrentHashMap,Java7 HashMap简述)
    并发编程学习笔记(二十四、AQS总结)
    并发编程学习笔记(二十三、CopyOnWriteArrayList源码分析)
    我二十多岁了,至今依然一事无成
    从零开始手写 mybatis(一)MVP 版本
  • 原文地址:https://www.cnblogs.com/ziytong/p/10677771.html
Copyright © 2020-2023  润新知