• 模型搭建练习2_实现nn模块、optim、two_layer、dynamic_net


    用variable实现nn.module

     1 import torch
     2 from torch.autograd import Variable
     3 
     4 N, D_in, H, D_out = 64, 1000, 100, 10
     5 
     6 x = Variable(torch.randn(N, D_in))
     7 y = Variable(torch.randn(N, D_out), requires_grad=False)
     8 
     9 model = torch.nn.Sequential(
    10     torch.nn.Linear(D_in, H),
    11     torch.nn.ReLU(),
    12     torch.nn.Linear(H, D_out),
    13 )
    14 
    15 loss_fn = torch.nn.MSELoss(size_average=False)
    16 
    17 learning_rate = 1e-4
    18 for t in range(2):
    19     # Forward pass
    20     y_pred = model(x)
    21 
    22     loss = loss_fn(y_pred, y)
    23     # Zero the gradients before running the backward pass.
    24     model.zero_grad()
    25     # Backward pass: compute gradient of the loss with respect to all the learnable
    26     # parameters of the model. Internally, the parameters of each Module are stored
    27     # in Variables with requires_grad=True, so this call will compute gradients for
    28     # all learnable parameters in the model.
    29     loss.backward()
    30 
    31     # Update the weights using gradient descent. Each parameter is a Variable
    32     for param in model.parameters():
    33         param.data -= learning_rate * param.grad.data

    实现optim

     1 import torch
     2 from torch.autograd import Variable
     3 
     4 N, D_in, H, D_out = 64, 1000, 100, 10
     5 x = Variable(torch.randn(N, D_in))
     6 y = Variable(torch.randn(N, D_out), requires_grad=False)
     7 
     8 model = torch.nn.Sequential(
     9     torch.nn.Linear(D_in, H),
    10     torch.nn.ReLU(),
    11     torch.nn.Linear(H, D_out),
    12 )
    13 loss_fn = torch.nn.MSELoss(size_average=False)
    14 
    15 learning_rate = 1e-4
    16 # Use the optim package to define an Optimizer that will update the weights of
    17 # the model for us.
    18 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    19 for t in range(500):
    20     # Forward pass: compute predicted y by passing x to the model.
    21     y_pred = model(x)
    22     loss = loss_fn(y_pred, y)
    23     # Before the backward pass, use the optimizer object to zero all of the
    24     # gradients for the variables it will update (which are the learnable weights
    25     # of the model)
    26     optimizer.zero_grad()
    27     # Backward pass: compute gradient of the loss with respect to model
    28     # parameters
    29     loss.backward()
    30     # Calling the step function on an Optimizer makes an update to its
    31     # parameters
    32     optimizer.step()

    实现two_layer模型

     1 import torch
     2 from torch.autograd import Variable
     3 
     4 class TwoLayerNet(torch.nn.Module):
     5     def __init__(self, D_in, H, D_out):
     6         super(TwoLayerNet, self).__init__()
     7         self.linear1 = torch.nn.Linear(D_in, H)
     8         self.linear2 = torch.nn.Linear(H, D_out)
     9 
    10     def forward(self, x):
    11         h_relu = self.linear1(x).clamp(min=0)
    12         y_pred = self.linear2(h_relu)
    13         return y_pred
    14 
    15 N, D_in, H, D_out = 64, 1000, 100, 10
    16 x = Variable(torch.randn(N, D_in))
    17 y = Variable(torch.randn(N, D_out), requires_grad=False)
    18 
    19 model = TwoLayerNet(D_in, H, D_out)
    20 criterion = torch.nn.MSELoss(size_average=False)
    21 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
    22 for t in range(2):
    23     y_pred = model(x)
    24     loss = criterion(y_pred, y)
    25     optimizer.zero_grad()
    26     loss.backward()
    27     optimizer.step()

    实现dynamic_net

     1 import random
     2 import torch
     3 from torch.autograd import Variable
     4 
     5 class DynamicNet(torch.nn.Module):
     6     def __init__(self, D_in, H, D_out):
     7         super(DynamicNet, self).__init__()
     8         self.input_linear = torch.nn.Linear(D_in, H)
     9         self.middle_linear = torch.nn.Linear(H, H)
    10         self.output_linear = torch.nn.Linear(H, D_out)
    11 
    12     def forward(self, x):
    13         h_relu = self.input_linear(x).clamp(min=0)
    14         for _ in range(random.randint(0, 3)):
    15             h_relu = self.middle_linear(h_relu).clamp(min=0)
    16         y_pred = self.output_linear(h_relu)
    17         return y_pred
    18 
    19 N, D_in, H, D_out = 64, 1000, 100, 10
    20 x = Variable(torch.randn(N, D_in))
    21 y = Variable(torch.randn(N, D_out), requires_grad=False)
    22 model = DynamicNet(D_in, H, D_out)
    23 
    24 criterion = torch.nn.MSELoss(size_average=False)
    25 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
    26 for t in range(2):
    27     y_pred = model(x)
    28     loss = criterion(y_pred, y)
    29     optimizer.zero_grad()
    30     loss.backward()
    31     optimizer.step()
  • 相关阅读:
    搭建本地yum仓库
    linux下查看http 并发和 tcp连接数
    MySQL用户管理及SQL语句详解
    API开发之接口安全(一)----生成sign
    TP5使用API时不可预知的内部异常
    TP5通用化API接口数据封装
    根据指定日期获取近一周,及该月起止时间戳
    14-Promise
    4-字符串扩展与新增方法
    换行
  • 原文地址:https://www.cnblogs.com/Joyce-song94/p/7477404.html
Copyright © 2020-2023  润新知