• TensorFlow入门-Tianic数据集训练


    import pandas as pd
    import tensorflow as tf
    from sklearn.model_selection import train_test_split
    import numpy as np
    
    train_step = 5
    train_path = 'train.csv'
    is_train = False
    learn_rate = 0.0001
    epochs = 10
    
    data = pd.read_csv(train_path)
    
    # 取部分特征字段用于分类,并将所有缺失的字段填充为0
    data['Sex'] = data['Sex'].apply(lambda s: 1 if s == 'male' else 0)
    data = data.fillna(0)
    dataset_X = data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare']]
    dataset_X = dataset_X.as_matrix()
    
    # 两种分类分别是幸存和死亡,'Survived'字段是其中一种分类的标签
    # 新增'Deceased'字段表示第二种分类的标签,取值为'Survived'字段取非
    data['Deceased'] = data['Survived'].apply(lambda s: int(not s))
    dataset_Y = data[['Deceased', 'Survived']]
    dataset_Y = dataset_Y.as_matrix()
    
    # 使用sklearn的train_test_split函数将标记数据切分为‘训练数据集和验证数据集’
    # 将全部标记数据随机洗牌后切分,其中验证数据占20%,由test_size参数指定
    X_train, X_test, Y_train, Y_test = train_test_split(dataset_X, dataset_Y,
                                                        test_size=0.2, random_state=42)
    # 声明输入数据点位符
    X = tf.placeholder(tf.float32, shape=[None, 6])
    Y = tf.placeholder(tf.float32, shape=[None, 2])
    # 声明变量(参数)
    W = tf.Variable(tf.random_normal([6, 2]), name='weights')
    b = tf.Variable(tf.zeros([2]), name='bias')
    # 构造前向传播计算图
    y_pred = tf.nn.softmax(tf.matmul(X, W) + b)
    
    # 使用交叉熵作为代价函数 Y * log(y_pred + e-10),程序中e-10,防止y_pred十分接近0或者1时,
    # 计算(log0)会得到无穷,导致非法,进一步导致无法计算梯度,迭代陷入崩溃。
    cross_entropy = -tf.reduce_sum(Y * tf.log(y_pred + 1e-10), reduction_indices=1)
    # 批量样本的代价为所有样本交叉熵的平均值
    cost = tf.reduce_mean(cross_entropy)
    # 使用随机梯度下降算法优化器来最小化代价,系统自动构建反向传播部分的计算图
    train_op = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost)
    
    saver = tf.train.Saver()
    if is_train:
        with tf.Session() as sess:
            writer = tf.summary.FileWriter('logfile', sess.graph)
            # 初始化所有变量,必须最先执行
            tf.global_variables_initializer().run()
            # 以下为训练迭代,迭代10轮
            for epoch in range(10):
                total_loss = 0
                for i in range(len(X_train)):
                    _, loss = sess.run([train_op, cost], feed_dict={X:[X_train[i]], Y:[Y_train[i]]})
                    total_loss += loss
                print('Epoch: %04d, total loss=%.9f' % (epoch + 1, total_loss))
                # 保存model
                if (epoch + 1) % train_step == 0:
                    save_path = saver.save(sess, './model/model.ckpt', global_step=epoch + 1)
            print('Training complete!')
            pred = sess.run(y_pred, feed_dict={X: X_test})
            # np.argmax的axis=1表示第2轴最大值的索引(这里表示列与列对比,最大值的索引)
            correct = np.equal(np.argmax(pred, axis=1), np.argmax(Y_test, axis=1))
            accuracy = np.mean(correct.astype(np.float32))
            print("Accuracy on validation set: %.9f" % accuracy)
    else:
        # 恢复model,继续训练
        with tf.Session() as sess1:
            # 从'checkpoint'文件中读出最新存档的路径
            ckpt = tf.train.get_checkpoint_state('./model')
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess1, ckpt.model_checkpoint_path)
                print('restore model sucess!')
            else:
                sys(0)
            print('continue train …………')
            for epoch in range(epochs):
                total_loss = 0
                for i in range(len(X_train)):
                    _, loss = sess1.run([train_op, cost], feed_dict={X:[X_train[i]], Y:[Y_train[i]]})
                    total_loss += loss
                print('Epoch: %04d, total loss=%.9f' % (epoch + 1, total_loss))
                # 保存model
                if (epoch + 1) % train_step == 0:
                    save_path = saver.save(sess1, './model/model.ckpt', global_step=epoch + 1)
            print('Training complete!')
            pred = sess1.run(y_pred, feed_dict={X: X_test})
            # np.argmax的axis=1表示第2轴最大值的索引(这里表示列与列对比,最大值的索引)
            correct = np.equal(np.argmax(pred, axis=1), np.argmax(Y_test, axis=1))
            accuracy = np.mean(correct.astype(np.float32))
            print("Accuracy on validation set: %.9f" % accuracy)
    
    # 恢复model参数
    with tf.Session() as sess2:
        # 从'checkpoint'文件中读出最新存档的路径
        print('restore lastest model, compute Accuracy!')
        ckpt = tf.train.get_checkpoint_state('./model')
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess2, ckpt.model_checkpoint_path)
        pred = sess2.run(y_pred, feed_dict={X: X_test})
        # np.argmax的axis=1表示第2轴最大值的索引(这里表示列与列对比,最大值的索引)
        correct = np.equal(np.argmax(pred, axis=1), np.argmax(Y_test, axis=1))
        accuracy = np.mean(correct.astype(np.float32))
        print("Accuracy on validation set: %.9f" % accuracy)
    

    TensorFlow自带的可视化工具TensorBoard

    在当前目录的命令行下键入:tensorboard --logdir=logfile

    根据命令行的提示,在浏览器里输入相应的网址。

  • 相关阅读:
    for循环嵌套的原理
    php for循环|求1-100之间被3整除且被5不整除的偶数
    php 1-100之间能被3整除的数字之和;
    php判断某年某月有多少天
    关系运算符
    变量
    习题5-7 使用函数求余弦函数的近似值
    习题5-6 使用函数输出水仙花数
    习题4-11 兔子繁衍问题
    习题4-9 打印菱形图案
  • 原文地址:https://www.cnblogs.com/touch-skyer/p/8524494.html
Copyright © 2020-2023  润新知