• torch


    1.从数据直接构建tensor

    x = torch.tensor([5.5,3])

    2.从已有的tensor构建一个tensor。这些方法会重用原来tensor的特征。

    x = x.new_ones(5,3,dtype=torch.double)

    torch.randn_like(x,dtype=torch.float)

    3.得到tensor的形状

    x.shape()

    x.size()

    4.tensor的运算

    x = torch.rand(5,3)     y = torch.rand(5,3)

    x+y    torch.add(x,y)

    result = torch.empty(5,3)     result = x+y

    y.add_()  #把结果保存到里面

    5.numpy里面的indexing都可以在tensor使用

    x[:,1:]

    6.resizing(在numpy里面用reshape在torch里面用view)

    x = torch.randn(4,4)   y = x.view(16)  z = x.view(-1,8)

    7.如果只用一个元素的tensor 使用。item() 方法可以把里面的value 变成Python数值

    x = torch.randn(1)     x.data   x.grad    x.item()    z.transpose(1,0)

    8 .numpy和tensor 之间的转化

    a = torch.ones(5)    b = a.numpy()  #a,b共享内存空间

    a = np.ones(5)       b = torch.from_numpy(a)  #a,b共享内存空间

    9.cuda tensor

    if torch.cuda.is_available():
      device = torch.device("cuda")

            y = torch.ones_like(x,device=device)

       x = x.to(device)

    y.cpu().data.numpy()           y.to("cpu").data.numpy()  model = model.cuda()

    10. 用numpy 实现两层神经网络

    N , D_in, H, D_out = 64,1000,100,10

    x = np.random.randn(N,D_in)
    y = np.random.randn(N,D_out)
    w1 = np.random.randn(D_in,H)
    w2 = np.random.randn(H,D_out)
    learning_rate = 1e-6
    for t in range(500):
      h = x.dot(w1)  #(N,H)
      h_relu = np.maxinum(h,0)
      y_pred = h_relu.dot(w2)
      #compute loss
      loss = np.square(y_pred - y).sum()
      print(t,loss)
          grad_y_pred = 2.0*(y_pred-y)
          grad_w2 = h_relu.T.dot(grad_y_pred)
          grad_h_relu = grad_y_pred.dot(w2.T)
          grad_h = grad_h_relu.copy()
          grad_h[h<0] = 0
          grad_w1 = x.T.dot(grad_h)
          w1 -= learning_rate*grad_w1
          w2 -=learning_rate*grad_w2

    11.用tensors 实现两层神经网络

      

    import torch
    
    
    dtype = torch.float
    device = torch.device("cpu")
    # device = torch.device("cuda:0") # Uncomment this to run on GPU
    
    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 64, 1000, 100, 10
    
    # Create random input and output data
    x = torch.randn(N, D_in, device=device, dtype=dtype)
    y = torch.randn(N, D_out, device=device, dtype=dtype)
    
    # Randomly initialize weights
    w1 = torch.randn(D_in, H, device=device, dtype=dtype)
    w2 = torch.randn(H, D_out, device=device, dtype=dtype)
    
    learning_rate = 1e-6
    for t in range(500):
        # Forward pass: compute predicted y
        h = x.mm(w1)
        h_relu = h.clamp(min=0)
        y_pred = h_relu.mm(w2)
    
        # Compute and print loss
        loss = (y_pred - y).pow(2).sum().item()
        print(t, loss)
    
        # Backprop to compute gradients of w1 and w2 with respect to loss
        grad_y_pred = 2.0 * (y_pred - y)
        grad_w2 = h_relu.t().mm(grad_y_pred)
        grad_h_relu = grad_y_pred.mm(w2.t())
        grad_h = grad_h_relu.clone()
        grad_h[h < 0] = 0
        grad_w1 = x.t().mm(grad_h)
    
        # Update weights using gradient descent
        w1 -= learning_rate * grad_w1
        w2 -= learning_rate * grad_w2

    autograd

    import torch
    
    dtype = torch.float
    device = torch.device("cpu")
    # device = torch.device("cuda:0") # Uncomment this to run on GPU
    
    # N 是 batch size; D_in 是 input dimension;
    # H 是 hidden dimension; D_out 是 output dimension.
    N, D_in, H, D_out = 64, 1000, 100, 10
    
    # 创建随机的Tensor来保存输入和输出
    # 设定requires_grad=False表示在反向传播的时候我们不需要计算gradient
    x = torch.randn(N, D_in, device=device, dtype=dtype)
    y = torch.randn(N, D_out, device=device, dtype=dtype)
    
    # 创建随机的Tensor和权重。
    # 设置requires_grad=True表示我们希望反向传播的时候计算Tensor的gradient
    w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
    w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
    
    learning_rate = 1e-6
    for t in range(500):
        # 前向传播:通过Tensor预测y;这个和普通的神经网络的前向传播没有任何不同,
        # 但是我们不需要保存网络的中间运算结果,因为我们不需要手动计算反向传播。
        y_pred = x.mm(w1).clamp(min=0).mm(w2)
    
        # 通过前向传播计算loss
        # loss是一个形状为(1,)的Tensor
        # loss.item()可以给我们返回一个loss的scalar
        loss = (y_pred - y).pow(2).sum()
        print(t, loss.item())
    
        # PyTorch给我们提供了autograd的方法做反向传播。如果一个Tensor的requires_grad=True,
        # backward会自动计算loss相对于每个Tensor的gradient。在backward之后,
        # w1.grad和w2.grad会包含两个loss相对于两个Tensor的gradient信息。
        loss.backward()
    
        # 我们可以手动做gradient descent(后面我们会介绍自动的方法)。
        # 用torch.no_grad()包含以下statements,因为w1和w2都是requires_grad=True,
        # 但是在更新weights之后我们并不需要再做autograd。
        # 另一种方法是在weight.data和weight.grad.data上做操作,这样就不会对grad产生影响。
        # tensor.data会我们一个tensor,这个tensor和原来的tensor指向相同的内存空间,
        # 但是不会记录计算图的历史。
        with torch.no_grad():
            w1 -= learning_rate * w1.grad
            w2 -= learning_rate * w2.grad
    
            # Manually zero the gradients after updating weights
            w1.grad.zero_()
            w2.grad.zero_()

    optim

    import torch
    
    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 64, 1000, 100, 10
    
    # Create random Tensors to hold inputs and outputs
    x = torch.randn(N, D_in)
    y = torch.randn(N, D_out)
    
    # Use the nn package to define our model and loss function.
    model = torch.nn.Sequential(
        torch.nn.Linear(D_in, H),
        torch.nn.ReLU(),
        torch.nn.Linear(H, D_out),
    )
    loss_fn = torch.nn.MSELoss(reduction='sum')
    
    # Use the optim package to define an Optimizer that will update the weights of
    # the model for us. Here we will use Adam; the optim package contains many other
    # optimization algoriths. The first argument to the Adam constructor tells the
    # optimizer which Tensors it should update.
    learning_rate = 1e-4
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    for t in range(500):
        # Forward pass: compute predicted y by passing x to the model.
        y_pred = model(x)
    
        # Compute and print loss.
        loss = loss_fn(y_pred, y)
        print(t, loss.item())
    
        # Before the backward pass, use the optimizer object to zero all of the
        # gradients for the variables it will update (which are the learnable
        # weights of the model). This is because by default, gradients are
        # accumulated in buffers( i.e, not overwritten) whenever .backward()
        # is called. Checkout docs of torch.autograd.backward for more details.
        optimizer.zero_grad()
    
        # Backward pass: compute gradient of the loss with respect to model
        # parameters
        loss.backward()
    
        # Calling the step function on an Optimizer makes an update to its
        # parameters
        optimizer.step()

    自定义的nn Modules

    
    
    
    
    
    

    import torch

    class TwoLayerNet(torch.nn.Module):
        def __init__(self, D_in, H, D_out):
            """
            In the constructor we instantiate two nn.Linear modules and assign them as
            member variables.
            """
            super(TwoLayerNet, self).__init__()
            self.linear1 = torch.nn.Linear(D_in, H)
            self.linear2 = torch.nn.Linear(H, D_out)

        def forward(self, x):
            """
            In the forward function we accept a Tensor of input data and we must return
            a Tensor of output data. We can use Modules defined in the constructor as
            well as arbitrary operators on Tensors.
            """
            h_relu = self.linear1(x).clamp(min=0)
            y_pred = self.linear2(h_relu)
            return y_pred


    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 64, 1000, 100, 10

    # Create random Tensors to hold inputs and outputs
    x = torch.randn(N, D_in)
    y = torch.randn(N, D_out)

    # Construct our model by instantiating the class defined above
    model = TwoLayerNet(D_in, H, D_out)

    # Construct our loss function and an Optimizer. The call to model.parameters()
    # in the SGD constructor will contain the learnable parameters of the two
    # nn.Linear modules which are members of the model.
    criterion = torch.nn.MSELoss(reduction='sum')
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
    for t in range(500):
        # Forward pass: Compute predicted y by passing x to the model
        y_pred = model(x)

        # Compute and print loss
        loss = criterion(y_pred, y)
        print(t, loss.item())

        # Zero gradients, perform a backward pass, and update the weights.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    
    
  • 相关阅读:
    编程之美 2014资格赛 格格取数
    ios游戏开发--cocos2d学习(1)
    ios开发中常用的也是最基本的mysql语句
    无限树形结构的数据库表设计
    认真的辞职
    几种JavaScript富应用MVC MVVM框架
    javascript创建对象和属性的几种方式
    webresource.axd文件的配置及使用
    ITextSharp用来生成 PDF 的一个组件
    flexpaper 开源轻量级的在浏览器上显示各种文档的组件
  • 原文地址:https://www.cnblogs.com/Liudanxixi/p/11810730.html
Copyright © 2020-2023  润新知