• 在多分类任务实验中手动实现 使用 𝑳𝟐 正则化


    1 导入所需要的包

    import torch  
    import numpy as np  
    import random  
    from IPython import display  
    from matplotlib import pyplot as plt  
    import torchvision  
    import torchvision.transforms as transforms   

    2 下载MNIST数据集

    mnist_train = torchvision.datasets.MNIST(root='../Datasets/MNIST', train=True, download=True, transform=transforms.ToTensor())  
    mnist_test = torchvision.datasets.MNIST(root='../Datasets/MNIST', train=False,download=True, transform=transforms.ToTensor())  

    3 读取数据

    batch_size = 256 
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)  
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)  

    4 初始化参数+定义隐藏层的激活函数

    num_inputs,num_hiddens,num_outputs =784, 256,10
    def init_param():
        W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens,num_inputs)), dtype=torch.float32)  
        b1 = torch.zeros(1, dtype=torch.float32)  
        W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs,num_hiddens)), dtype=torch.float32)  
        b2 = torch.zeros(1, dtype=torch.float32)  
        params =[W1,b1,W2,b2]
        for param in params:  
            param.requires_grad_(requires_grad=True)  
        return W1,b1,W2,b2
    def relu(x):  
        x = torch.max(input=x,other=torch.tensor(0.0))  
        return x  

    5 定义模型

    def net(X):  
        X = X.view((-1,num_inputs))  
        H = relu(torch.matmul(X,W1.t())+b1)  
        return torch.matmul(H,W2.t())+b2  

    6 定义交叉熵损失函数和优化器

    loss = torch.nn.CrossEntropyLoss()  
    def SGD(paras,lr):  
        for param in params:  
            param.data -= lr * param.grad  

    7 定义L2范数

    def l2_penalty(w):
        return (w**2).sum()/2

    8 定义训练函数

    def train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr=None,optimizer=None,mylambda=0):  
        train_ls, test_ls = [], []
        for epoch in range(num_epochs):
            ls, count = 0, 0
            for X,y in train_iter :
                X = X.reshape(-1,num_inputs)
                l=loss(net(X),y)+ mylambda*l2_penalty(W1) + mylambda*l2_penalty(W2)
                optimizer.zero_grad()
                l.backward()
                optimizer.step()
                ls += l.item()
                count += y.shape[0]
            train_ls.append(ls)
            ls, count = 0, 0
            for X,y in test_iter:
                X = X.reshape(-1,num_inputs)
                l=loss(net(X),y) + mylambda*l2_penalty(W1) + mylambda*l2_penalty(W2)
                ls += l.item()
                count += y.shape[0]
            test_ls.append(ls)
            if(epoch+1)%5==0:
                print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
        return train_ls,test_ls

    9 开始训练模型

    lr = 0.01  
    num_epochs = 20 
    
    Lamda = [0,0.1,0.2,0.3,0.4,0.5]
    Train_ls, Test_ls = [], []
    for lamda in Lamda:
        print("current lambda is %f"%lamda)
        W1,b1,W2,b2 = init_param()
        loss = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD([W1,b1,W2,b2],lr = 0.001)
        train_ls, test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr,optimizer,lamda)   
        Train_ls.append(train_ls)
        Test_ls.append(test_ls)

    10 绘制训练集和测试集的loss曲线

    x = np.linspace(0,len(Train_ls[1]),len(Train_ls[1]))
    plt.figure(figsize=(10,8))
    for i in range(0,len(Lamda)):
        plt.plot(x,Train_ls[i],label= f'L2_Regularization:{Lamda [i]}',linewidth=1.5)
        plt.xlabel('different epoch')
        plt.ylabel('loss')
    plt.legend(loc=2, bbox_to_anchor=(1.1,1.0),borderaxespad = 0.)
    plt.title('train loss with L2_penalty')
    plt.show()

    因上求缘,果上努力~~~~ 作者:希望每天涨粉,转载请注明原文链接:https://www.cnblogs.com/BlairGrowing/p/15513476.html

  • 相关阅读:
    Cygwin/WSL专用Shell函数,restartps:带命令行参数重启某进程
    Cygwin/WSL一键备份或导入Windows计划任务实用函数示例(backupschtasks、restoreschtasks)
    Cygwin/WSL调用Windows schtasks命令操作Windows计划任务系列函数(查询、启用、禁用、删除)
    Cygwin/MSYS2文件夹书签功能函数,调用Windows资源管理器快速打开文件夹;
    Cygwin/WSL专用函数wmicps:借助Windows WMIC组件从命令行获取某进程的命令行参数及其关联信息;
    Windows命令行窗口查询DLNA读取的多媒体文件路径和读取的进度;
    《The way to go学习笔记》第三章
    android平台的图文上下滚动与左右滚动效果
    架构第二周作业
    架构第一周练习题
  • 原文地址:https://www.cnblogs.com/BlairGrowing/p/15513476.html
Copyright © 2020-2023  润新知