• 神经网络dnn 多分类模型


    import tensorflow.compat.v1 as tf
    # from tensorflow.examples.tutorials.mnist import input_data
    import os
    import pandas as pd
    import numpy as np
    from tensorflow.python.keras.utils import to_categorical
    
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    
    
    # 数据准备
    df = pd.read_csv('./data/train_date_new.csv',sep=',',index_col=None,header=0)
    
    # print(df)
    
    # print(df.groupby(by="diabete").count())
    
    X = df.iloc[:,1:].values.astype(np.float32)
    Y = to_categorical(df.iloc[:,0].values).astype(np.float32)
    
    
    print(X.shape,Y.shape)
    
    train_split = int(df.shape[0]*0.8)
    x_train,y_train,x_test,y_test = X[:train_split,:],Y[:train_split,:],X[train_split:,:],Y[train_split:,:]
    
    ind,col = x_train.shape
    y_ind,y_col = y_train.shape
    
    # 全连接神经网络
    def dense(x, w, b, keeppord):
        linear = tf.matmul(x, w) + b
        # activation = tf.nn.relu(linear)
        activation = tf.nn.sigmoid(linear)
        # activation = tf.nn.tanh(linear)
        # activation = tf.nn.softmax(linear)
        y = tf.nn.dropout(activation,keeppord)
        return y
    
    
    def DNNModel(image, w, b, keeppord):
        global dense1
        for i in range(len(w)-1):
            if i==0:
                dense1 = dense(image, w[i], b[i],keeppord)
            else:
                dense1 = dense(dense1, w[i], b[i],keeppord)
    
        output = tf.matmul(dense1, w[-1]) + b[-1]
        return output
    
    # 生成网络的权重
    def gen_weights(unit_list):
        w = []
        b = []
        # 遍历层数
        for i in range(len(unit_list)-1):
            sub_w = tf.Variable(tf.random_normal(shape=[unit_list[i], unit_list[i+1]]))
            sub_b = tf.Variable(tf.random_normal(shape=[1,unit_list[i+1]]))
            w.append(sub_w)
            b.append(sub_b)
        return w, b
    
    
    x = tf.placeholder(tf.float32, [None, col])
    y = tf.placeholder(tf.float32, [None, y_col])
    keepprob = tf.placeholder(tf.float32)
    
    global_step = tf.Variable(0)
    
    # unit_list = [784, 512, 256, 10]
    unit_list = [col, 512,256, y_col] #  0.7543333
    
    # unit_list = [col,1024,512,y_col]
    duropt = 0.75
    
    w, b = gen_weights(unit_list)
    y_pre = DNNModel(x, w, b, keepprob)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pre, labels=y))
    tf.summary.scalar("loss", loss)                 # 收集标量
    
    opt = tf.train.AdamOptimizer(0.01).minimize(loss, global_step=global_step)
    
    predict = tf.equal(tf.argmax(y_pre, axis=1), tf.argmax(y, axis=1))       # 返回每行或者每列最大值的索引,判断是否相等
    acc = tf.reduce_mean(tf.cast(predict, tf.float32))
    
    tf.summary.scalar("acc", acc)                   # 收集标量
    merged = tf.summary.merge_all()                 # 和并变量
    saver = tf.train.Saver()                        # 保存和加载模型
    init = tf.global_variables_initializer()        # 初始化全局变量
    
    
    
    bach = 4
    bach_0=bach-1
    min_bach = int(ind/4)
    print(bach_0,min_bach)
    
    
    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter("./log/tensorboard", tf.get_default_graph())      # tensorboard 事件文件
        for i in range(10000):
            for j in range(bach):
                if j <= bach_0:
                    x_train_bach, y_train_bach = x_train[(j * min_bach):(j + 1) * min_bach, :],\
                                                 y_train[(j * min_bach):(j + 1) * min_bach,:]
                else:
                    x_train_bach, y_train_bach = x_train[(j + 1) * min_bach:, :], y_train[(j + 1) * min_bach:, :]
    
                summary, _ = sess.run([merged, opt], feed_dict={x:x_train_bach, y:y_train_bach, keepprob: duropt})
                writer.add_summary(summary, i)              # 将每次迭代后的变量写入事件文件
    
            # 评估模型在验证集上的识别率
            if (i+1) % 1000 == 0:
                feeddict = {x: x_test, y: y_test, keepprob: 1.}      # 验证集
                valloss, accuracy = sess.run([loss, acc], feed_dict=feeddict)
                print(i, 'th batch val loss:', valloss, ', accuracy:', accuracy)
    
        saver.save(sess, './model/tfdnn.ckpt')        # 保存模型
        print('测试集准确度:', sess.run(acc, feed_dict={x:x_test, y:y_test, keepprob:1.}))
    
    writer.close()
    

      

  • 相关阅读:
    异常详细信息: System.Data.SqlClient.SqlException:过程或函数 需要参数 但未提供该参数。
    silverlight error message,ErrorCode:2254 ErrorCode: 1001
    每日英语 词汇
    每日词汇
    电子商务网站用户体验根据用户的习惯进行推荐
    刚做项目的时候
    silverlight 不可
    在Centos6上安装RabbitMQ的过程(有点坑)
    SpringBoot整合Redis
    在IDEA中创建工程并将工程添加至Git
  • 原文地址:https://www.cnblogs.com/wuzaipei/p/16319720.html
Copyright © 2020-2023  润新知