• 神经网络与深度学习(邱锡鹏)编程练习4 FNN 简单神经网络 Jupyter导出版 TensorFlow


    GitHub - nndl/nndl-exercise-ans: Solutions for nndl/exercise

    准备数据

    import os
    import numpy as np
    import tensorflow as tf
    from tensorflow import keras
    from tensorflow.keras import layers, optimizers, datasets
    
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # or any {'0', '1', '2'}
    
    def mnist_dataset():
        (x, y), (x_test, y_test) = datasets.mnist.load_data()
        #normalize
        x = x/255.0
        x_test = x_test/255.0
        
        return (x, y), (x_test, y_test)
    
    print(list(zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'])))
    
    [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]
    

    建立模型

    class myModel:
        def __init__(self):
            ####################
            '''声明模型对应的参数'''
            ####################
            self.W1 = tf.Variable(shape=[28*28, 100], dtype=tf.float32,
                             initial_value=tf.random.uniform(shape=[28*28, 100],
                                                             minval=-0.1, maxval=0.1))
            self.b1 = tf.Variable(shape=[100], dtype=tf.float32, initial_value=tf.zeros(100))
            self.W2 = tf.Variable(shape=[100, 10], dtype=tf.float32,
                             initial_value=tf.random.uniform(shape=[100, 10],
                                                             minval=-0.1, maxval=0.1))
            self.b2 = tf.Variable(shape=[10], dtype=tf.float32, initial_value=tf.zeros(10))
            self.trainable_variables = [self.W1, self.W2, self.b1, self.b2]
        def __call__(self, x):
            ####################
            '''实现模型函数体,返回未归一化的logits'''
            ####################
            flat_x = tf.reshape(x, shape=[-1, 28*28])
            h1 = tf.tanh(tf.matmul(flat_x, self.W1) + self.b1)
            logits = tf.matmul(h1, self.W2) + self.b2
            return logits
            
    model = myModel()
    
    optimizer = optimizers.Adam()
    

    计算 loss

    @tf.function
    def compute_loss(logits, labels):
        return tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels))
    
    @tf.function
    def compute_accuracy(logits, labels):
        predictions = tf.argmax(logits, axis=1)
        return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
    
    @tf.function
    def train_one_step(model, optimizer, x, y):
        with tf.GradientTape() as tape:
            logits = model(x)
            loss = compute_loss(logits, y)
    
        # compute gradient
        trainable_vars = [model.W1, model.W2, model.b1, model.b2]
        grads = tape.gradient(loss, trainable_vars)
        for g, v in zip(grads, trainable_vars):
            v.assign_sub(0.01*g)
    
        accuracy = compute_accuracy(logits, y)
    
        # loss and accuracy is scalar tensor
        return loss, accuracy
    
    @tf.function
    def test(model, x, y):
        logits = model(x)
        loss = compute_loss(logits, y)
        accuracy = compute_accuracy(logits, y)
        return loss, accuracy
    

    实际训练

    train_data, test_data = mnist_dataset()
    for epoch in range(50):
        loss, accuracy = train_one_step(model, optimizer, 
                                        tf.constant(train_data[0], dtype=tf.float32), 
                                        tf.constant(train_data[1], dtype=tf.int64))
        print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy())
    loss, accuracy = test(model, 
                          tf.constant(test_data[0], dtype=tf.float32), 
                          tf.constant(test_data[1], dtype=tf.int64))
    
    print('test loss', loss.numpy(), '; accuracy', accuracy.numpy())
    
    epoch 0 : loss 2.3099253 ; accuracy 0.13093333
    epoch 1 : loss 2.304885 ; accuracy 0.13463333
    epoch 2 : loss 2.2998776 ; accuracy 0.13906667
    epoch 3 : loss 2.2949016 ; accuracy 0.14308333
    epoch 4 : loss 2.2899568 ; accuracy 0.14741667
    epoch 5 : loss 2.2850416 ; accuracy 0.15208334
    epoch 6 : loss 2.2801554 ; accuracy 0.15626666
    epoch 7 : loss 2.2752976 ; accuracy 0.1611
    epoch 8 : loss 2.2704673 ; accuracy 0.16601667
    epoch 9 : loss 2.265663 ; accuracy 0.17066666
    epoch 10 : loss 2.2608845 ; accuracy 0.17548333
    epoch 11 : loss 2.256132 ; accuracy 0.18088333
    epoch 12 : loss 2.2514026 ; accuracy 0.18561667
    epoch 13 : loss 2.2466977 ; accuracy 0.19076666
    epoch 14 : loss 2.2420156 ; accuracy 0.19616666
    epoch 15 : loss 2.2373557 ; accuracy 0.20183334
    epoch 16 : loss 2.2327178 ; accuracy 0.2075
    epoch 17 : loss 2.228101 ; accuracy 0.21338333
    epoch 18 : loss 2.2235048 ; accuracy 0.21935
    epoch 19 : loss 2.2189286 ; accuracy 0.22548333
    epoch 20 : loss 2.214372 ; accuracy 0.23158333
    epoch 21 : loss 2.209834 ; accuracy 0.23836666
    epoch 22 : loss 2.205315 ; accuracy 0.24433333
    epoch 23 : loss 2.2008135 ; accuracy 0.25088334
    epoch 24 : loss 2.1963296 ; accuracy 0.2572
    epoch 25 : loss 2.1918633 ; accuracy 0.26376668
    epoch 26 : loss 2.187413 ; accuracy 0.27018332
    epoch 27 : loss 2.1829789 ; accuracy 0.27626666
    epoch 28 : loss 2.1785607 ; accuracy 0.28215
    epoch 29 : loss 2.1741576 ; accuracy 0.28843334
    epoch 30 : loss 2.1697698 ; accuracy 0.2942
    epoch 31 : loss 2.165396 ; accuracy 0.30048335
    epoch 32 : loss 2.1610367 ; accuracy 0.30646667
    epoch 33 : loss 2.1566913 ; accuracy 0.31136668
    epoch 34 : loss 2.152359 ; accuracy 0.31738332
    epoch 35 : loss 2.1480403 ; accuracy 0.32326666
    epoch 36 : loss 2.1437342 ; accuracy 0.32873333
    epoch 37 : loss 2.1394403 ; accuracy 0.33388335
    epoch 38 : loss 2.1351585 ; accuracy 0.33935
    epoch 39 : loss 2.1308892 ; accuracy 0.34543332
    epoch 40 : loss 2.1266308 ; accuracy 0.35115
    epoch 41 : loss 2.1223838 ; accuracy 0.35598335
    epoch 42 : loss 2.1181479 ; accuracy 0.36223334
    epoch 43 : loss 2.1139226 ; accuracy 0.36766666
    epoch 44 : loss 2.109708 ; accuracy 0.37271667
    epoch 45 : loss 2.1055036 ; accuracy 0.37828332
    epoch 46 : loss 2.101309 ; accuracy 0.38358334
    epoch 47 : loss 2.097124 ; accuracy 0.38878334
    epoch 48 : loss 2.092949 ; accuracy 0.39408332
    epoch 49 : loss 2.088783 ; accuracy 0.39916667
    test loss 2.0848043 ; accuracy 0.4049
    
    
    
  • 相关阅读:
    Windows10 ntoskrnl.exe占用大量的磁盘空间(100%)
    Windows10 正式企业版激活
    edit-distance
    climbing-stairs
    minimum-path-sum
    unique-paths-II
    unique-paths
    剑指 Offer 42. 连续子数组的最大和
    剑指 Offer 54. 二叉搜索树的第k大节点
    矩阵中的路径
  • 原文地址:https://www.cnblogs.com/hbuwyg/p/16342889.html
Copyright © 2020-2023  润新知