• 多元线性回归(用numpy和tensorflow分别实现)


    ...以前所有机器的学习代码都是看别人写的, 这个算是第一个自己独立写出的代码吧(虽然很简单

    加油~~~

     1 import numpy as np
     2 import tensorflow as tf
     3 import time
     4 
     5 # generate data
     6 n_dims = 100
     7 x = np.random.randn(10000, n_dims)
     8 w1 = np.random.rand(n_dims)
     9 b1 = [-0.32]
    10 y = np.matmul(x, w1) + b1
    11 
    12 # init: np和tf共享w,b
    13 w = np.random.randn(n_dims)
    14 b = np.random.randn(1)
    15 l_t = 0.00001  # 学习率
    16 epoch = 1000
    17 
    18 
    19 # np_train过程
    20 def train(x, targt, w, b):
    21     y = np.matmul(x, w) + b
    22     loss = np.sum((y - targt) ** 2)
    23     z = x * (y - targt)[:, np.newaxis]
    24     w_g = np.sum(z, axis=0)
    25     b_g = np.sum(y - targt)
    26     w -= l_t * w_g
    27     b -= l_t * b_g
    28     return loss, w, b
    29 
    30 
    31 # np
    32 def nmp(x, y, w, b):
    33     t1 = time.time()
    34     for i in range(epoch):
    35         loss, w, b = train(x, y, w, b)
    36         if i % 100 == 0:
    37             print(i, loss)
    38     print('np_time:', time.time() - t1)
    39     return w, b
    40 
    41 
    42 # tf
    43 def tfw(x, y, w, b):
    44     t1 = time.time()
    45     wf = tf.Variable(w, dtype='float')
    46     bf = tf.Variable(b, dtype='float')
    47     xf = tf.placeholder('float', [None, n_dims])
    48     yf = tf.placeholder('float', [None, 1])
    49     # wf = tf.expand_dims(wf, -1)
    50     out = tf.add(tf.matmul(xf, wf), bf)
    51     # tf的矩阵乘法需要二维!!!!!!!!!
    52     loss = tf.reduce_sum(tf.pow(out - yf, 2))
    53     optimizer = tf.train.GradientDescentOptimizer(l_t).minimize(loss)
    54     init = tf.global_variables_initializer()
    55     with tf.Session() as sess:
    56         sess.run(init)
    57         y = y[:, np.newaxis]  # 扩展y为二维矩阵
    58         for i in range(epoch):
    59             _loss, _ = sess.run([loss, optimizer], feed_dict={xf: x, yf: y})
    60             if i % 100 == 0:
    61                 print(i, _loss)
    62         print("tf_time:", time.time() - t1)
    63         return sess.run([wf, bf])
    64 
    65 
    66 # test
    67 tmp = np.random.rand(n_dims)
    68 ans = np.sum(tmp * w1) + b1
    69 # np
    70 wn, bn = w, b
    71 wn, bn = nmp(x, y, wn, bn)
    72 print(ans - np.sum(tmp * wn) - bn)  #: time:11S, error: 1e-25
    73 # tf
    74 w = w[:, np.newaxis]
    75 wf, bf = tfw(x, y, w, b)
    76 print(ans - np.sum(tmp * wf.reshape(-1)) - bf)  #: time:6S, error: 4e-9
  • 相关阅读:
    NABCD项目分析
    周总结6
    移动端展示
    暑期周进度报告(四)
    暑期周进度报告(三)
    暑期周进度报告(二)
    暑期周进度报告(一)
    《人件》阅读笔记02
    周学习进度报告(2020/06/05)
    2020春季软件工程课程总结
  • 原文地址:https://www.cnblogs.com/xidian-mao/p/11303140.html
Copyright © 2020-2023  润新知