• 手写数字问题实战(层)


    FashionMNIST

    import tensorflow as tf
    from tensorflow import keras
    from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
    
    
    def preprocess(x, y):
        """数据处理函数"""
        x = tf.cast(x, dtype=tf.float32) / 255.
        y = tf.cast(y, dtype=tf.int32)
    
        return x, y
    
    
    # 加载数据
    (x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()
    print(x.shape, y.shape)
    
    # 处理train数据
    batch_size = 128
    db = tf.data.Dataset.from_tensor_slices((x, y))
    db = db.map(preprocess).shuffle(10000).batch(batch_size)
    
    # 处理test数据
    db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    db_test = db_test.map(preprocess).batch(batch_size)
    
    # 生成train数据的迭代器
    db_iter = iter(db)
    sample = next(db_iter)
    print(f'batch: {sample[0].shape,sample[1].shape}')
    
    # 设计网络结构
    model = Sequential([
        layers.Dense(256, activation=tf.nn.relu),  # [b,784] --> [b,256]
        layers.Dense(128, activation=tf.nn.relu),  # [b,256] --> [b,128]
        layers.Dense(64, activation=tf.nn.relu),  # [b,128] --> [b,64]
        layers.Dense(32, activation=tf.nn.relu),  # [b,64] --> [b,32]
        layers.Dense(10),  # [b,32] --> [b,10], 330=32*10+10
    ])
    
    model.build(input_shape=[None, 28 * 28])
    model.summary()  # 调试
    # w = w - lr*grad
    optimizer = optimizers.Adam(lr=1e-3)  # 优化器,加快训练速度
    
    
    def main():
        """主运行函数"""
        for epoch in range(10):
    
            for step, (x, y) in enumerate(db):
    
                # x:[b,28,28] --> [b,784]
                # y:[b]
                x = tf.reshape(x, [-1, 28 * 28])
    
                with tf.GradientTape() as tape:
                    # [b,784] --> [b,10]
                    logits = model(x)
                    y_onehot = tf.one_hot(y, depth=10)
                    # [b]
                    loss_mse = tf.reduce_mean(tf.losses.MSE(y_onehot, logits))
                    loss_ce = tf.reduce_mean(
                        tf.losses.categorical_crossentropy(y_onehot,
                                                           logits,
                                                           from_logits=True))
    
                grads = tape.gradient(loss_ce, model.trainable_variables)
                optimizer.apply_gradients(zip(grads, model.trainable_variables))
    
                if step % 100 == 0:
                    print(epoch, step, f'loss: {float(loss_ce),float(loss_mse)}')
    
            # test
            total_correct = 0
            total_num = 0
            for x, y in db_test:
                # x:[b,28,28] --> [b,784]
                # y:[b]
                x = tf.reshape(x, [-1, 28 * 28])
                # [b,10]
                logits = model(x)
                # logits --> prob [b,10]
                prob = tf.nn.softmax(logits, axis=1)
                # [b,10] --> [b], int32
                pred = tf.argmax(prob, axis=1)
                pred = tf.cast(pred, dtype=tf.int32)
                # pred:[b]
                # y:[b]
                # correct: [b], True: equal; False: not equal
                correct = tf.equal(pred, y)
                correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))
    
                total_correct += int(correct)
                total_num += x.shape[0]
    
            acc = total_correct / total_num
            print(epoch, f'test acc: {acc}')
    
    
    if __name__ == '__main__':
        main()
    
    (60000, 28, 28) (60000,)
    batch: (TensorShape([128, 28, 28]), TensorShape([128]))
    Model: "sequential"
    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense (Dense)                multiple                  200960    
    _________________________________________________________________
    dense_1 (Dense)              multiple                  32896     
    _________________________________________________________________
    dense_2 (Dense)              multiple                  8256      
    _________________________________________________________________
    dense_3 (Dense)              multiple                  2080      
    _________________________________________________________________
    dense_4 (Dense)              multiple                  330       
    =================================================================
    Total params: 244,522
    Trainable params: 244,522
    Non-trainable params: 0
    _________________________________________________________________
    0 0 loss: (2.317634105682373, 0.240666925907135)
    0 100 loss: (0.5373745560646057, 22.973751068115234)
    0 200 loss: (0.43246397376060486, 23.848735809326172)
    0 300 loss: (0.46746426820755005, 25.622543334960938)
    0 400 loss: (0.35051512718200684, 25.582551956176758)
    0 test acc: 0.8411
    1 0 loss: (0.3928898572921753, 25.729997634887695)
    1 100 loss: (0.3873934745788574, 28.322147369384766)
    1 200 loss: (0.3231426477432251, 23.980030059814453)
    1 300 loss: (0.38196271657943726, 26.83534812927246)
    1 400 loss: (0.32163679599761963, 23.470108032226562)
    1 test acc: 0.8522
    2 0 loss: (0.37968552112579346, 24.739742279052734)
    2 100 loss: (0.3641986846923828, 32.74003601074219)
    2 200 loss: (0.28391164541244507, 27.59665870666504)
    2 300 loss: (0.35625365376472473, 32.84230422973633)
    2 400 loss: (0.2914122939109802, 28.2105712890625)
    2 test acc: 0.8643
    3 0 loss: (0.32174795866012573, 28.54513168334961)
    3 100 loss: (0.33406931161880493, 35.23341369628906)
    3 200 loss: (0.2978339195251465, 31.21166229248047)
    3 300 loss: (0.34828150272369385, 37.68536376953125)
    3 400 loss: (0.2958236336708069, 34.39887619018555)
    3 test acc: 0.8657
    4 0 loss: (0.2884419858455658, 28.723865509033203)
    4 100 loss: (0.30416643619537354, 42.262481689453125)
    4 200 loss: (0.2996847927570343, 35.41472244262695)
    4 300 loss: (0.312608003616333, 43.87290954589844)
    4 400 loss: (0.2881354093551636, 39.22924041748047)
    4 test acc: 0.8671
    5 0 loss: (0.27839434146881104, 35.540130615234375)
    5 100 loss: (0.2806701958179474, 48.4008903503418)
    5 200 loss: (0.2766285836696625, 42.87168884277344)
    5 300 loss: (0.2809426784515381, 52.62693786621094)
    5 400 loss: (0.26189112663269043, 47.885108947753906)
    5 test acc: 0.8735
    6 0 loss: (0.2579110264778137, 43.07761764526367)
    6 100 loss: (0.2582871615886688, 59.50879669189453)
    6 200 loss: (0.28057757019996643, 48.108917236328125)
    6 300 loss: (0.27566733956336975, 59.61842346191406)
    6 400 loss: (0.25613951683044434, 58.044837951660156)
    6 test acc: 0.8777
    7 0 loss: (0.2313823103904724, 51.310028076171875)
    7 100 loss: (0.2618938088417053, 68.77056884765625)
    7 200 loss: (0.2880491614341736, 57.71855163574219)
    7 300 loss: (0.2529357075691223, 68.95218658447266)
    7 400 loss: (0.22080641984939575, 67.93892669677734)
    7 test acc: 0.8817
    8 0 loss: (0.2174786627292633, 67.77418518066406)
    8 100 loss: (0.24183037877082825, 73.86817932128906)
    8 200 loss: (0.2777296304702759, 70.0999755859375)
    8 300 loss: (0.24576255679130554, 75.03533935546875)
    8 400 loss: (0.22443264722824097, 77.09339904785156)
    8 test acc: 0.8838
    9 0 loss: (0.1982460469007492, 67.42424011230469)
    9 100 loss: (0.18205790221691132, 84.63094329833984)
    9 200 loss: (0.2785370349884033, 78.5411376953125)
    9 300 loss: (0.2484734058380127, 94.95791625976562)
    9 400 loss: (0.20044317841529846, 85.65432739257812)
    9 test acc: 0.8833
  • 相关阅读:
    线程的终止pthread_exit和返回为什么终止的原因
    临界区互斥使用之使用自旋锁
    临界区的同步操作-------------使用信号量 实现
    常用解压操作
    group compare vs pair compare
    两个总体的参数关系
    纳伪|去真
    Ho|H1|p-value|p值与U值|单侧检验
    统计分布近似转化
    样本均值的标准误差|样本均值的标准差|总体标准差|样本标准差|简单随机抽样|样本均值估计|样本方差估计|
  • 原文地址:https://www.cnblogs.com/abdm-989/p/14123338.html
Copyright © 2020-2023  润新知