• 人工智能深度学习:TensorFlow2.0如何保持和读取模型?


    导入数据

    (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
    
    train_labels = train_labels[:1000]
    test_labels = test_labels[:1000]
    
    train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
    test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0

    1.定义一个模型

    def create_model():
        model = keras.Sequential([
            keras.layers.Dense(128, activation='relu', input_shape=(784,)),
            keras.layers.Dropout(0.5),
            keras.layers.Dense(10, activation='softmax')
        ])
    
        model.compile(optimizer='adam',
                     loss=keras.losses.sparse_categorical_crossentropy,
                     metrics=['accuracy'])
        return model
    model = create_model()
    model.summary()
    Model: "sequential_2"
    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense_4 (Dense)              (None, 128)               100480    
    _________________________________________________________________
    dropout_2 (Dropout)          (None, 128)               0         
    _________________________________________________________________
    dense_5 (Dense)              (None, 10)                1290      
    =================================================================
    Total params: 101,770
    Trainable params: 101,770
    Non-trainable params: 0
    _________________________________________________________________

    2.checkpoint回调

    check_path = '106save/model.ckpt'
    check_dir = os.path.dirname(check_path)
    
    cp_callback = tf.keras.callbacks.ModelCheckpoint(check_path, 
                                                     save_weights_only=True, verbose=1)
    model = create_model()
    model.fit(train_images, train_labels, epochs=10,
             validation_data=(test_images, test_labels),
             callbacks=[cp_callback])
    Train on 1000 samples, validate on 1000 samples
    Epoch 1/10
     544/1000 [===============>..............] - ETA: 0s - loss: 2.0658 - accuracy: 0.2831 
    ...
    Epoch 00010: saving model to 106save/model.ckpt
    1000/1000 [==============================] - 0s 128us/sample - loss: 0.2701 - accuracy: 0.9170 - val_loss: 0.4465 - val_accuracy: 0.8620
    
    
    
    
    
    <tensorflow.python.keras.callbacks.History at 0x7fbcd872fbe0>
    !ls {check_dir}
    checkpoint  model.ckpt.data-00000-of-00001  model.ckpt.index
    model = create_model()
    
    loss, acc = model.evaluate(test_images, test_labels)
    print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
    1000/1000 [==============================] - 0s 69us/sample - loss: 2.4036 - accuracy: 0.0890
    Untrained model, accuracy:  8.90%
    model.load_weights(check_path)
    loss, acc = model.evaluate(test_images, test_labels)
    print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
    1000/1000 [==============================] - 0s 47us/sample - loss: 0.4465 - accuracy: 0.8620
    Untrained model, accuracy: 86.20%

    3.设置checkpoint回调

    check_path = '106save02/cp-{epoch:04d}.ckpt'
    check_dir = os.path.dirname(check_path)
    
    cp_callback = tf.keras.callbacks.ModelCheckpoint(check_path,save_weights_only=True, 
                                                     verbose=1, period=5)  # 每5
    model = create_model()
    model.fit(train_images, train_labels, epochs=10,
             validation_data=(test_images, test_labels),
             callbacks=[cp_callback])
    Train on 1000 samples, validate on 1000 samples
    Epoch 1/10
    1000/1000 [==============================] - 1s 1ms/sample - loss: 1.7242 - accuracy: 0.4490 - val_loss: 1.2205 - val_accuracy: 0.6890
    ....
    Epoch 00010: saving model to 106save02/cp-0010.ckpt
    1000/1000 [==============================] - 0s 120us/sample - loss: 0.2845 - accuracy: 0.9220 - val_loss: 0.4402 - val_accuracy: 0.8580
    
    
    
    
    
    <tensorflow.python.keras.callbacks.History at 0x7fbc5c911b38>
    !ls {check_dir}
    checkpoint            cp-0010.ckpt.data-00000-of-00001
    cp-0005.ckpt.data-00000-of-00001  cp-0010.ckpt.index
    cp-0005.ckpt.index

    载入最新版模型

    latest = tf.train.latest_checkpoint(check_dir)
    print(latest)
    106save02/cp-0010.ckpt
    model = create_model()
    model.load_weights(latest)
    loss, acc = model.evaluate(test_images, test_labels)
    print('restored model accuracy: {:5.2f}%'.format(acc*100))
    1000/1000 [==============================] - 0s 78us/sample - loss: 0.4402 - accuracy: 0.8580
    restored model accuracy: 85.80%

    5.手动保持权重

    model.save_weights('106save03/manually_model.ckpt')
    model = create_model()
    model.load_weights('106save03/manually_model.ckpt')
    loss, acc = model.evaluate(test_images, test_labels)
    print('restored model accuracy: {:5.2f}%'.format(acc*100))
    1000/1000 [==============================] - 0s 69us/sample - loss: 0.4402 - accuracy: 0.8580
    restored model accuracy: 85.80%

    6.保持整个模型

    model = create_model()
    model.fit(train_images, train_labels, epochs=10,
             validation_data=(test_images, test_labels),
             )
    model.save('106save03.h5')
    Train on 1000 samples, validate on 1000 samples
    Epoch 1/10
    1000/1000 [==============================] - 0s 240us/sample - loss: 1.7636 - accuracy: 0.4460 - val_loss: 1.2041 - val_accuracy: 0.7230
    ...
    Epoch 10/10
    1000/1000 [==============================] - 0s 90us/sample - loss: 0.2574 - accuracy: 0.9290 - val_loss: 0.4674 - val_accuracy: 0.8540
    new_model = keras.models.load_model('106save03.h5')
    new_model.summary()
    Model: "sequential_11"
    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense_22 (Dense)             (None, 128)               100480    
    _________________________________________________________________
    dropout_11 (Dropout)         (None, 128)               0         
    _________________________________________________________________
    dense_23 (Dense)             (None, 10)                1290      
    =================================================================
    Total params: 101,770
    Trainable params: 101,770
    Non-trainable params: 0
    _________________________________________________________________
    loss, acc = model.evaluate(test_images, test_labels)
    print('restored model accuracy: {:5.2f}%'.format(acc*100))
    1000/1000 [==============================] - 1s 810us/sample - loss: 0.4674 - accuracy: 0.8540
    restored model accuracy: 85.40%

    7.其他导出模型的方法

    import time
    saved_model_path = "./saved_models/{}".format(int(time.time()))
    
    tf.keras.experimental.export_saved_model(model, saved_model_path)
    saved_model_path
    './saved_models/1553601639'
    new_model = tf.keras.experimental.load_from_saved_model(saved_model_path)
    new_model.summary()
    Model: "sequential_11"
    _________________________________________________________________
    Layer (type)                 Output Shape              Param #   
    =================================================================
    dense_22 (Dense)             (None, 128)               100480    
    _________________________________________________________________
    dropout_11 (Dropout)         (None, 128)               0         
    _________________________________________________________________
    dense_23 (Dense)             (None, 10)                1290      
    =================================================================
    Total params: 101,770
    Trainable params: 101,770
    Non-trainable params: 0
    _________________________________________________________________
    # 该方法必须先运行compile函数
    new_model.compile(optimizer=model.optimizer,  # keep the optimizer that was loaded
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    
    # Evaluate the restored model.
    loss, acc = new_model.evaluate(test_images, test_labels)
    print("Restored model, accuracy: {:5.2f}%".format(100*acc))
    1000/1000 [==============================] - 0s 131us/sample - loss: 0.4674 - accuracy: 0.8540
    Restored model, accuracy: 85.40%
  • 相关阅读:
    .Net需要掌握的知识
    图片轮播
    vector
    2016ACM青岛区域赛题解
    总是有一个程序的bug没找到
    poj1001_Exponentiation_java高精度
    poj2236_并查集_Wireless Network
    poj1703_Find them, Catch them_并查集
    poj2492_A Bug's Life_并查集
    poj1182食物链_并查集_挑战程序设计竞赛例题
  • 原文地址:https://www.cnblogs.com/peijz/p/12916081.html
Copyright © 2020-2023  润新知