• pytorch笔记1


    pytorch基础知识

    import torch
    x  = torch.tensor([2,3,4], dtype=torch.float) # 创建一个Tensor,值为[2.,3.,4.],类型为 float
    
    # 创建一个需要求 梯度的 tensor。
    x2 = torch.tensor([2,3,4], dtype=torch.float, requires_grad=True)
    x.size()
    torch.Size([3])
    a=1;b=2;
    a.add_(b) # 所有带 _ 的operation,都会更改调用对象的值,
    #例如 a=1;b=2; a.add_(b); a就是3了,没有 _ 的operation就没有这种效果,只会返回运算结果
    torch.cuda.is_available()
    
    import torch
    x = torch.tensor([1,1,1,1,1], dtype=torch.float, requires_grad=True)
    y = x * 2
    grads = torch.FloatTensor([1,2,3,4,5])
    y.backward(grads)#如果y是scalar的话,那么直接y.backward(),然后通过x.grad方式,就可以得到var的梯度
    x.grad           #如果y不是scalar,那么只能通过传参的方式给x指定梯度

    神经网络

    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    
    class Net(nn.Module):
    
        def __init__(self):
            super(Net, self).__init__()
            # 1 input image channel, 6 output channels, 3x3 square convolution
            # kernel
            self.conv1 = nn.Conv2d(1, 6, 3)
            self.conv2 = nn.Conv2d(6, 16, 3)
            # an affine operation: y = Wx + b
            self.fc1 = nn.Linear(16 * 6 * 6, 120)  # 6*6 from image dimension
            self.fc2 = nn.Linear(120, 84)
            self.fc3 = nn.Linear(84, 10)
    
        def forward(self, x):
            # Max pooling over a (2, 2) window
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
            # If the size is a square you can only specify a single number
            x = F.max_pool2d(F.relu(self.conv2(x)), 2)
            x = x.view(-1, self.num_flat_features(x))
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
    
        def num_flat_features(self, x):
            size = x.size()[1:]  # all dimensions except the batch dimension
            num_features = 1
            for s in size:
                num_features *= s
            return num_features
    
    net = Net()
    print(net)#打印网络结构
    
    params = list(net.parameters())
    print(len(params))
    print(params[0].size())  # conv1's .weight
    #让我们尝试一个32x32随机输入
    input = torch.randn(1, 1, 32, 32)#(1,1,32,32)大小的正态分布
    out = net(input)
    print(out)
    #使用随机梯度将所有参数和反向传播的梯度缓冲区归零
    net.zero_grad()
    out.backward(torch.randn(1, 10))
    #损失函数
    output = net(input)
    target = torch.randn(10)  # a dummy target, for example
    target = target.view(1, -1)  # make it the same shape as output
    criterion = nn.MSELoss()
    
    loss = criterion(output, target)
    print(loss)
    print(loss.grad_fn)  # MSELoss
    print(loss.grad_fn.next_functions[0][0])  # Linear
    print(loss.grad_fn.next_functions[0][0].next_functions[0][0])  # ReLU
    #反向传播
    net.zero_grad()     # zeroes the gradient buffers of all parameters
    
    print('conv1.bias.grad before backward')
    print(net.conv1.bias.grad)
    
    loss.backward()
    
    print('conv1.bias.grad after backward')
    print(net.conv1.bias.grad)
    #更新权重
    import torch.optim as optim
    
    # create your optimizer
    optimizer = optim.SGD(net.parameters(), lr=0.01)
    
    # in your training loop:
    optimizer.zero_grad()   # zero the gradient buffers
    output = net(input)
    loss = criterion(output, target)
    loss.backward()
    optimizer.step()    # Does the update

    一般结构

    import torch.nn as nn
    import torch.nn.functional as F
    
    class Net(nn.Module):#需要继承这个类
        def __init__(self):
            super(Net, self).__init__()
            #建立了两个卷积层,self.conv1, self.conv2,注意,这些层都是不包含激活函数的
            self.conv1 = nn.Conv2d(1, 6, 5) # 1 input image channel, 6 output channels, 5x5 square convolution kernel
            self.conv2 = nn.Conv2d(6, 16, 5)
            #三个全连接层
            self.fc1   = nn.Linear(16*5*5, 120) # an affine operation: y = Wx + b
            self.fc2   = nn.Linear(120, 84)
            self.fc3   = nn.Linear(84, 10)
    
        def forward(self, x): #注意,2D卷积层的输入data维数是 batchsize*channel*height*width
            x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window
            x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number
            x = x.view(-1, self.num_flat_features(x))
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
        
        def num_flat_features(self, x):
            size = x.size()[1:] # all dimensions except the batch dimension
            num_features = 1
            for s in size:
                num_features *= s
            return num_features
    
    net = Net()
    
    # create your optimizer
    optimizer = optim.SGD(net.parameters(), lr = 0.01)
    
    # in your training loop:
    for i in range(num_iteations):
        optimizer.zero_grad() # zero the gradient buffers,如果不归0的话,gradients会累加
    
        output = net(input) # 这里就体现出来动态建图了,你还可以传入其他的参数来改变网络的结构
    
        loss = criterion(output, target)
        loss.backward() # 得到grad,i.e.给Variable.grad赋值
        optimizer.step() # Does the update,i.e. Variable.data -= learning_rate*Variable.grad
  • 相关阅读:
    python day03--字符串
    python day02--运算符,编码
    python day01
    数据库:对 null 和notnull,单表操作,多表操作 。
    数据库的介绍
    数据库对表的操作练习。
    数据库基础知识:介绍
    并发编程知识总结,软件开发架构,socket套接字模板,粘包问题,struct解决粘包问题,上传大文件数据,socketserver,关于操作系统的发展史,进程,线程。
    Event事件,进程池与线程池,高性能爬取梨视频,协程,协程的目的,gevent。
    GIL全局解释器锁,多线程的作用,死锁现象,信号量(了解),线程队列。
  • 原文地址:https://www.cnblogs.com/tianyudizhua/p/15505732.html
Copyright © 2020-2023  润新知