• pytorch实现优化optimize


    代码:

    #集中不同的优化方式
    import torch
    import torch.utils.data as Data
    import torch.nn.functional as F
    from torch.autograd import Variable
    import matplotlib.pyplot as plt
    
    #hyper parameters 超参数
    LR = 0.01
    BATCH_SIZE = 32
    EPOCH = 12
    
    if __name__ == '__main__':
        #数据
        x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
        y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))
        #分批处理数据
        torch_dataset = Data.TensorDataset(x,y)
        loader = Data.DataLoader(dataset = torch_dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=2)
        #定义网络
        class Net(torch.nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.hidden = torch.nn.Linear(1, 20)
                self.predict = torch.nn.Linear(20,1)
    
            def forward(self, x):
                x = F.relu(self.hidden(x))
                x = self.predict(x)
                return x
    
        #different nets
        net_SGD = Net()
        net_Momentum = Net()
        net_RMSprop = Net()
        net_Adam = Net()
        nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]  #放到一个list中
    
        #different optimizers
        #momentum,alpha,betas是指定参数
        opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr = LR)
        opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr = LR, momentum=0.8)
        opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr = LR, alpha=0.9)
        opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr = LR, betas=(0.9, 0.99))
        optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
    
        loss_func = torch.nn.MSELoss()
        losses_history = [[], [], [], []] # record loss
    
        for epoch in range(EPOCH):
            print('epoch', epoch)
            for step, (batch_x, batch_y) in enumerate(loader):
                b_x = Variable(batch_x)
                b_y = Variable(batch_y)
                #它接受一系列可迭代的对象作为参数,将对象中对应的元素打包成一个个tuple(元组)
                for net, opt, l_his in zip(nets, optimizers, losses_history):
                    output = net(b_x)
                    loss = loss_func(output,b_y)
                    opt.zero_grad()
                    loss.backward()
                    opt.step()
                    #有点不懂,为什么不是losses_history.append
                    l_his.append(loss.item()) #loss recorder
    
        labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
        for i,l_his in enumerate(losses_history):
            plt.plot(l_his,label=labels[i])  #plt.plot根据点画线
        plt.legend(loc='best')               #给图像加上图例
        plt.xlabel('Steps')
        plt.ylabel('Loss')
        plt.ylim(0, 0.2)                     #设置y轴上的最小值和最大值                    
        plt.show()
    
    
    
    
    
    
        optimizer = torch.optim.SGD()
  • 相关阅读:
    20155307《网络对抗》网络欺诈技术防范
    20155307《网络对抗》信息搜集与漏洞扫描
    20155307《网络对抗》MSF基础应用
    20155307《网络对抗》恶意代码分析
    20155307《网络对抗》免杀原理与实践
    预习非数值数据的编码方式
    预习原码补码
    C语言ll作业01
    C语言寒假大作战04
    C语言寒假大作战03
  • 原文地址:https://www.cnblogs.com/loyolh/p/12299900.html
Copyright © 2020-2023  润新知