• TensorFlow2_200729系列---3、梯度下降求简单线性回归实例


    TensorFlow2_200729系列---3、梯度下降求简单线性回归实例

    一、总结

    一句话总结:

    梯度下降:梯度下降是对loss函数做的,对loss的w和b,比如w = w - learningRate*loss对w的梯度
    画动态图:在动态图中更新y轴数据即可,如果需要更新text标注,那就也更新
    import numpy as np
    from matplotlib import pyplot as plt
    from matplotlib import animation
    from matplotlib import pylab
    %pylab
    
    fig,ax=plt.subplots()
    
    # 画散点图
    x = data.iloc[:,0]
    y = data.iloc[:,-1]
    ax.scatter(x,y)
    
    # 画直线图
    
    line,=ax.plot(x,0*x+0)
    # 存这个text1,方便动画的时候修改
    text1 = ax.text(30,110,"w=0.0000,b=0.0000", fontdict={'size':16,'color':'r'})
    
    def animate(i): 
        text1.set_text('w=%.4f,b=%.4f' %(bw_list[i][1],bw_list[i][0]))
        line.set_ydata(bw_list[i][1]*x+bw_list[i][0])
        return line,
    
    def init(): 
        line.set_ydata(0*x+0)
        return line,
    
    ani=animation.FuncAnimation(fig=fig,func=animate,frames=999,init_func=init,interval=5,blit=False)
    ani.save('line_model.gif', writer='imagemagick', fps=30)
    plt.show()

    1、梯度下降为什么会在极值点那里收敛?

    因为极值点处斜率(梯度)为0,这样学习率*梯度也是0

    2、梯度下降中为什么乘上步长?

    不乘上步长,有些情况下,导数太大了

    3、matplotlib画动态图如何更新文本标注?

    text1 = ax.text(30,110,"w=0.0000,b=0.0000", fontdict={'size':16,'color':'r'})
    text1.set_text('w=%.4f,b=%.4f' %(bw_list[i][1],bw_list[i][0]))

    4、matplotlib画动态图?

    主要是初始化函数和更新函数,在更新函数中更新y数据:line.set_ydata(bw_list[i][1]*x+bw_list[i][0])

    5、本题梯度下降核心代码(和公式推导一样)?

    grad_b = 2(wx+b-y):b_gradient += (2/N) * ((w_current * x + b_current) - y)
    grad_w = 2(wx+b-y)*x:w_gradient += (2/N) * x * ((w_current * x + b_current) - y)
    def step_gradient(b_current, w_current, points, learningRate):
        b_gradient = 0
        w_gradient = 0
        N = float(len(points))
        for i in range(0, len(points)):
            x = points[i, 0]
            y = points[i, 1]
            # grad_b = 2(wx+b-y)
            b_gradient += (2/N) * ((w_current * x + b_current) - y)
            # grad_w = 2(wx+b-y)*x
            w_gradient += (2/N) * x * ((w_current * x + b_current) - y)
        # update w'
        new_b = b_current - (learningRate * b_gradient)
        new_w = w_current - (learningRate * w_gradient)
        return [new_b, new_w]

    6、本题损失函数最小二乘法实例?

    totalError += (y - (w * x + b)) ** 2
    # y = wx + b
    def compute_error_for_line_given_points(b, w, points):
        totalError = 0
        for i in range(0, len(points)):
            x = points[i, 0]
            y = points[i, 1]
            # computer mean-squared-error
            totalError += (y - (w * x + b)) ** 2
        # average loss for each point
        return totalError / float(len(points))

    二、梯度下降求简单线性回归实例

    博客对应课程的视频位置:

    In [1]:
    import numpy as np
    
    
    # data = []
    # for i in range(100):
    # 	x = np.random.uniform(3., 12.)
    # 	# mean=0, std=0.1
    # 	eps = np.random.normal(0., 0.1)
    # 	y = 1.477 * x + 0.089 + eps
    # 	data.append([x, y])
    # data = np.array(data)
    # print(data.shape, data)
    
    # y = wx + b
    def compute_error_for_line_given_points(b, w, points):
        totalError = 0
        for i in range(0, len(points)):
            x = points[i, 0]
            y = points[i, 1]
            # computer mean-squared-error
            totalError += (y - (w * x + b)) ** 2
        # average loss for each point
        return totalError / float(len(points))
    
    
    
    def step_gradient(b_current, w_current, points, learningRate):
        b_gradient = 0
        w_gradient = 0
        N = float(len(points))
        for i in range(0, len(points)):
            x = points[i, 0]
            y = points[i, 1]
            # grad_b = 2(wx+b-y)
            b_gradient += (2/N) * ((w_current * x + b_current) - y)
            # grad_w = 2(wx+b-y)*x
            w_gradient += (2/N) * x * ((w_current * x + b_current) - y)
        # update w'
        new_b = b_current - (learningRate * b_gradient)
        new_w = w_current - (learningRate * w_gradient)
        return [new_b, new_w]
    
    def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
        b = starting_b
        w = starting_w
        # update for several times
        for i in range(num_iterations):
            b, w = step_gradient(b, w, np.array(points), learning_rate)
        return [b, w]
    
    
    def run():
    	
        points = np.genfromtxt("data.csv", delimiter=",")
        learning_rate = 0.0001
        initial_b = 0 # initial y-intercept guess
        initial_w = 0 # initial slope guess
        num_iterations = 1000
        print("Starting gradient descent at b = {0}, w = {1}, error = {2}"
              .format(initial_b, initial_w,
                      compute_error_for_line_given_points(initial_b, initial_w, points))
              )
        print("Running...")
        [b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
        print("After {0} iterations b = {1}, w = {2}, error = {3}".
              format(num_iterations, b, w,
                     compute_error_for_line_given_points(b, w, points))
              )
    
    if __name__ == '__main__':
        run()
    
    Starting gradient descent at b = 0, w = 0, error = 5565.107834483211
    Running...
    After 1000 iterations b = 0.08893651993741346, w = 1.4777440851894448, error = 112.61481011613473
    
    In [2]:
    import numpy as np
    import matplotlib.pyplot as plt
    import pandas as pd
    
    data = pd.read_csv('data.csv',header=None)
    data
    
    Out[2]:
     01
    0 32.502345 31.707006
    1 53.426804 68.777596
    2 61.530358 62.562382
    3 47.475640 71.546632
    4 59.813208 87.230925
    ... ... ...
    95 50.030174 81.536991
    96 49.239765 72.111832
    97 50.039576 85.232007
    98 48.149859 66.224958
    99 25.128485 53.454394

    100 rows × 2 columns

    In [3]:
    x = data.iloc[:,0]
    y = data.iloc[:,-1]
    plt.scatter(x,y)
    plt.show()
    
    In [4]:
    learning_rate = 0.0001
    initial_b = 0 # initial y-intercept guess
    initial_w = 0 # initial slope guess
    num_iterations = 1000
    
    In [6]:
    # 计算误差
    # y = wx + b
    def compute_error_for_line_given_points(b, w, points):
        totalError = 0
        for i in range(0, len(points)):
            x = points.iloc[i,0]
            y = points.iloc[i,1]
            # computer mean-squared-error
            totalError += (y - (w * x + b)) ** 2
        # average loss for each point
        return totalError / float(len(points))
    
    In [8]:
    print("Starting gradient descent at b = {0}, w = {1}, error = {2}"
          .format(initial_b, initial_w,
                  compute_error_for_line_given_points(initial_b, initial_w, data))
          )
    print("Running...")
    
    Starting gradient descent at b = 0, w = 0, error = 5565.10783448321
    Running...
    
    In [15]:
    def step_gradient(b_current, w_current, points, learningRate):
        b_gradient = 0
        w_gradient = 0
        N = float(len(points))
        for i in range(0, len(points)):
            x = points.iloc[i, 0]
            y = points.iloc[i, 1]
            # grad_b = 2(wx+b-y)
            b_gradient += (2/N) * ((w_current * x + b_current) - y)
            # grad_w = 2(wx+b-y)*x
            w_gradient += (2/N) * x * ((w_current * x + b_current) - y)
        # update w'
        new_b = b_current - (learningRate * b_gradient)
        new_w = w_current - (learningRate * w_gradient)
        # print("w为{},b为{}".format(new_b,new_w))
        return [new_b, new_w]
    
    bw_list=[]
    def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
        b = starting_b
        w = starting_w
        # update for several times
        for i in range(num_iterations):
            b, w = step_gradient(b, w, points, learning_rate)
            bw_list.append((b,w))
        return [b, w]
    
    In [17]:
    [b, w] = gradient_descent_runner(data, initial_b, initial_w, learning_rate, num_iterations)
    print("After {0} iterations b = {1}, w = {2}, error = {3}".
          format(num_iterations, b, w,
                 compute_error_for_line_given_points(b, w, data))
          )
    # print(bw_list)
    
    After 1000 iterations b = 0.08893651993741353, w = 1.4777440851894448, error = 112.61481011613472
    
    In [20]:
    print(bw_list[0][0],bw_list[0][1])
    print(bw_list[1][0],bw_list[1][1])
    
    0.014547010110737297 0.7370702973591052
    0.02187396295959641 1.1067954543515157
    
    In [46]:
    import numpy as np
    from matplotlib import pyplot as plt
    from matplotlib import animation
    from matplotlib import pylab
    %pylab
    
    fig,ax=plt.subplots()
    
    # 画散点图
    x = data.iloc[:,0]
    y = data.iloc[:,-1]
    ax.scatter(x,y)
    
    # 画直线图
    
    line,=ax.plot(x,0*x+0)
    # 存这个text1,方便动画的时候修改
    text1 = ax.text(30,110,"w=0.0000,b=0.0000", fontdict={'size':16,'color':'r'})
    
    def animate(i): 
        text1.set_text('w=%.4f,b=%.4f' %(bw_list[i][1],bw_list[i][0]))
        line.set_ydata(bw_list[i][1]*x+bw_list[i][0])
        return line,
    
    def init(): 
        line.set_ydata(0*x+0)
        return line,
    
    ani=animation.FuncAnimation(fig=fig,func=animate,frames=999,init_func=init,interval=5,blit=False)
    ani.save('line_model.gif', writer='imagemagick', fps=30)
    plt.show()
    
    Using matplotlib backend: Qt5Agg
    Populating the interactive namespace from numpy and matplotlib
    
    In [ ]:
     
     
    我的旨在学过的东西不再忘记(主要使用艾宾浩斯遗忘曲线算法及其它智能学习复习算法)的偏公益性质的完全免费的编程视频学习网站: fanrenyi.com;有各种前端、后端、算法、大数据、人工智能等课程。
    博主25岁,前端后端算法大数据人工智能都有兴趣。
    大家有啥都可以加博主联系方式(qq404006308,微信fan404006308)互相交流。工作、生活、心境,可以互相启迪。
    聊技术,交朋友,修心境,qq404006308,微信fan404006308
    26岁,真心找女朋友,非诚勿扰,微信fan404006308,qq404006308
    人工智能群:939687837

    作者相关推荐

  • 相关阅读:
    #454. 【UER #8】打雪仗
    6496. 【GDOI2020模拟03.08】圣痕
    6495. 【GDOI2020模拟03.08】死星
    6494. 【GDOI2020模拟03.08】勘探
    NOI Online划水记
    6482. 【GDOI2020模拟02.22】代数几何(algebraic)
    6493. 【GDOI2020模拟03.04】迷宫
    6492. 【GDOI2020模拟03.04】多项式
    6491. 【GDOI2020模拟03.04】铺路
    #76. 【UR #6】懒癌
  • 原文地址:https://www.cnblogs.com/Renyi-Fan/p/13412551.html
Copyright © 2020-2023  润新知