• tensorflow实现二分类


    读万卷书,不如行万里路。之前看了不少机器学习方面的书籍,但是实战很少。这次因为项目接触到tensorflow,用一个最简单的深层神经网络实现分类和回归任务。

    首先说分类任务,分类任务的两个思路:

    如果是多分类,输出层为计算出的预测值Z3(1,classes),可以利用softmax交叉熵损失函数,将Z3中的值转化为概率值,概率值最大的即为预测值。

    在tensorflow中,多分类的损失函数为:

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3, labels=Y))

    为了匹配Z3和Y的尺寸,需要将输入Y进行one-hot编码,
    from keras.utils import to_categorical
    Y_train = to_categorical(Y_train)
    计算准确性:
    correct_prediction = tf.equal(tf.argmax(Z3,axis=1), tf.argmax(Y,1) )  # tf.argmax找出每一列最大值的索引
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # tf.cast转化数据类型
    print("Train Accuracy:", sess.run(accuracy,feed_dict={X: X_train, Y: Y_train}))
    print("Test Accuracy: ",sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}))
    完整代码如下:
    # -*- coding: utf-8 -*-
    
    import numpy as np
    import tensorflow as tf
    import math
    from sklearn.model_selection import train_test_split
    from keras.utils import to_categorical
    import keras
    import scipy
    import os
    import csv
    import pandas as pd
    from keras.utils import to_categorical
    
    from sklearn.preprocessing import normalize
    
    
    #创建placeholders对象
    def create_placeholders(n_x,n_y):
        """
        placeholder是TensorFlow的占位符节点,由placeholder方法创建,其也是一种常量,但是由用户在调用run方法是传递的.
        也可以将placeholder理解为一种形参。
        即其不像constant那样直接可以使用,需要用户传递常数值。
        """
        X=tf.placeholder(tf.float32,shape=[None,n_x],name='X')
        Y=tf.placeholder(tf.float32,shape=[None,n_y],name='Y')
    
        return X,Y
    
    #初始化参数
    def initialize_parameters(m,n):
        #设置种子后,每次生成的参数都是相同的,保证重复实验的结果可以参考
        tf.set_random_seed(1)
        W1 = tf.get_variable("W1", shape=[n, n], initializer=tf.contrib.layers.xavier_initializer(seed=1))
        b1 = tf.get_variable("b1", shape=[1, n], initializer=tf.zeros_initializer())
        W2=tf.get_variable("W2",shape=[n,2],initializer=tf.contrib.layers.xavier_initializer(seed=1))
        b2=tf.get_variable("b2",shape=[1,2],initializer=tf.zeros_initializer())
        parameters={
            "W1": W1,
            "b1":b1,
            "W2":W2,
            "b2":b2
        }
        return parameters
    
    #前向传播
    def forward_propagation(X,parameters,lambd):
        W1=parameters['W1']
        b1=parameters['b1']
        W2 = parameters['W2']
        b2 = parameters['b2']
    
        #使用L1正则化
        tf.add_to_collection('losses',tf.contrib.layers.l1_regularizer(lambd)(W1))
        tf.add_to_collection('losses', tf.contrib.layers.l1_regularizer(lambd)(W2))
    
        A1=tf.nn.relu(tf.matmul(X,W1)+b1)
        Z3=tf.matmul(A1,W2)+b2
    
        return  Z3
    
    def compute_cost(Z3, Y):
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3, labels=Y))
        tf.add_to_collection('losses',cost)
        return tf.add_n(tf.get_collection('losses'))
    
    def model(X_train, Y_train,X_test,Y_test, learning_rate=0.01,minibatch_size=10, num_epochs=30000, print_cost=True):
    
        tf.set_random_seed(1)
        (m, n_x) = X_train.shape
        n_y = Y_train.shape[1]
        costs = []
        # 创建Placeholders,一个张量
        X,Y=create_placeholders(n_x,n_y)
        print(X.shape, Y.shape)
        # 初始化参数
        parameters=initialize_parameters(m,n_x)
        # 前向传播
        Z3=forward_propagation(X,parameters,0.002)
        # 计算代价
        cost = compute_cost(Z3, Y)
    
        # 后向传播: 定义tensorflow optimizer对象,这里使用AdamOptimizer.
        optimizer=tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost)
        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
        # 初始化所有参数
        init=tf.global_variables_initializer()
    
        # 启动session来计算tensorflow graph
        with tf.Session() as sess:
            sess.run(init)
            for epoch in range(num_epochs):
                epoch_cost=sess.run([optimizer,cost],feed_dict={X:X_train,Y:Y_train})
                test_cost=sess.run(cost,feed_dict={X:X_test,Y:Y_test})
                epoch_cost=epoch_cost[1]
    
                if print_cost==True and epoch%100==0:
                    print("Cost after epoch %i: %f" %(epoch,epoch_cost))
                    print("test_cost: ",test_cost)
    
            # lets save the parameters in a variable
            parameters = sess.run(parameters)
            print("Parameters have been trained!")
            # 神经网络经过训练后得到的值
    
            correct_prediction = tf.equal(tf.argmax(Z3,axis=1), tf.argmax(Y,1) )  # tf.argmax找出每一列最大值的索引
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # tf.cast转化数据类型
            print("Train Accuracy:", sess.run(accuracy,feed_dict={X: X_train, Y: Y_train}))
            print("Test Accuracy: ",sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}))
    
    
            return parameters
    
    def loaddata(file):
    
        fr=open(file,'r', encoding='utf-8-sig')
        reader = csv.reader(fr)
        data=[]
        fltLine=[]
        for line in reader:
            data.append(line)
        data=np.mat(data)
        data=data.astype(np.float32)
        X=data[1:,0:-1]
        Y=data[1:,-1]
        X=normalize(X,axis=0,norm='max')
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
        return X_train, X_test, Y_train, Y_test
    
    
    if __name__=='__main__':
    
        X_train, X_test, Y_train, Y_test= loaddata('./data3.csv')
        Y_train=to_categorical(Y_train)
        Y_test = to_categorical(Y_test)
        parmeters=model(X_train,Y_train,X_test,Y_test)
        

    另一种是单纯的针对二分类,主要有两点不同,一是损失函数的使用:

    输出层Z3为(1,1)

    cost= tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Z3, labels=Y))
    另一个就是计算准确率:
    one = tf.ones_like(Z3)
    zero = tf.zeros_like(Z3)
    label = tf.where(tf.less(Z3, 0.5), x=zero, y=one)

    correct_prediction = tf.equal(label, Y) # tf.argmax找出每一列最大值的索引
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # tf.cast转化数据类型
    print("Train Accuracy:", sess.run(accuracy, feed_dict={X: X_train, Y: Y_train}))
    print("Test Accuracy: ", sess.run(accuracy, feed_dict={X: X_test, Y: Y_test}))
    完整代码如下:
    # -*- coding: utf-8 -*-
    
    import numpy as np
    import tensorflow as tf
    import math
    from sklearn.model_selection import train_test_split
    from keras.utils import to_categorical
    import keras
    import scipy
    import os
    import csv
    import pandas as pd
    from keras.utils import to_categorical
    
    from sklearn.preprocessing import normalize
    
    
    # 创建placeholders对象
    def create_placeholders(n_x, n_y):
        """
        placeholder是TensorFlow的占位符节点,由placeholder方法创建,其也是一种常量,但是由用户在调用run方法是传递的.
        也可以将placeholder理解为一种形参。
        即其不像constant那样直接可以使用,需要用户传递常数值。
        """
        X = tf.placeholder(tf.float32, shape=[None, n_x], name='X')
        Y = tf.placeholder(tf.float32, shape=[None, n_y], name='Y')
    
        return X, Y
    
    
    # 初始化参数
    def initialize_parameters(m, n):
        # 设置种子后,每次生成的参数都是相同的,保证重复实验的结果可以参考
        tf.set_random_seed(1)
        W1 = tf.get_variable("W1", shape=[n, n], initializer=tf.contrib.layers.xavier_initializer(seed=1))
        b1 = tf.get_variable("b1", shape=[1, n], initializer=tf.zeros_initializer())
        W2 = tf.get_variable("W2", shape=[n, 1], initializer=tf.contrib.layers.xavier_initializer(seed=1))
        b2 = tf.get_variable("b2", shape=[1, 1], initializer=tf.zeros_initializer())
        parameters = {
            "W1": W1,
            "b1": b1,
            "W2": W2,
            "b2": b2
        }
        return parameters
    
    
    # 前向传播
    def forward_propagation(X, parameters, lambd):
        W1 = parameters['W1']
        b1 = parameters['b1']
        W2 = parameters['W2']
        b2 = parameters['b2']
    
        # 使用L1正则化
        #tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lambd)(W1))
        #tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lambd)(W2))
    
        #A1 = tf.nn.relu(tf.matmul(X, W1) + b1)
        Z3 = tf.matmul(X, W2) + b2
        #Z3=tf.sigmoid(Z3)
    
        return Z3
    
    
    def compute_cost(Z3, Y):
        # 经过激活函数处理后的交叉熵
        #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3, labels=Y))
        cost= tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Z3, labels=Y))
        #cost=-tf.reduce_mean(Y*tf.log(tf.clip_by_value(Z3,1e-10,1.0)))
        tf.add_to_collection('losses', cost)
        return tf.add_n(tf.get_collection('losses'))
    
    
    def model(X_train, Y_train, X_test, Y_test, learning_rate=0.05, minibatch_size=10, num_epochs=50000, print_cost=True):
        tf.set_random_seed(1)
        (m, n_x) = X_train.shape
        n_y = Y_train.shape[1]
        costs = []
        # 创建Placeholders,一个张量
        X, Y = create_placeholders(n_x, n_y)
        print(X.shape, Y.shape)
        # 初始化参数
        parameters = initialize_parameters(m, n_x)
        # 前向传播
        Z3 = forward_propagation(X, parameters, 0.001)
        # 计算代价
        cost = compute_cost(Z3, Y)
    
        # 后向传播: 定义tensorflow optimizer对象,这里使用AdamOptimizer.
        optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost)
        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
        # 初始化所有参数
        init = tf.global_variables_initializer()
    
        # 启动session来计算tensorflow graph
        with tf.Session() as sess:
            sess.run(init)
            for epoch in range(num_epochs):
                epoch_cost = sess.run([optimizer, cost], feed_dict={X: X_train, Y: Y_train})
                test_cost = sess.run(cost, feed_dict={X: X_test, Y: Y_test})
                epoch_cost = epoch_cost[1]
    
                if print_cost == True and epoch % 100 == 0:
                    print("Cost after epoch %i: %f" % (epoch, epoch_cost))
                    print("test_cost: ", test_cost)
    
            # lets save the parameters in a variable
            parameters = sess.run(parameters)
            print("Parameters have been trained!")
            # 神经网络经过训练后得到的值
            # print(sess.run(Y,feed_dict={Y:Y_train}))
            # Y=tf.cast(Y,tf.int64)
    
            one = tf.ones_like(Z3)
            zero = tf.zeros_like(Z3)
            label = tf.where(tf.less(Z3, 0.5), x=zero, y=one)
    
            correct_prediction = tf.equal(label, Y)  # tf.argmax找出每一列最大值的索引
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # tf.cast转化数据类型
            print("Train Accuracy:", sess.run(accuracy, feed_dict={X: X_train, Y: Y_train}))
            print("Test Accuracy: ", sess.run(accuracy, feed_dict={X: X_test, Y: Y_test}))
    
            return parameters
    
    
    def loaddata(file):
        fr = open(file, 'r', encoding='utf-8-sig')
        reader = csv.reader(fr)
        data = []
        fltLine = []
        for line in reader:
            data.append(line)
        data = np.mat(data)
        data = data.astype(np.float32)
        X = data[1:, 0:-1]
        Y = data[1:, -1]
        X = normalize(X, axis=0, norm='max')
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
        return X_train, X_test, Y_train, Y_test
    
    
    if __name__ == '__main__':
    
        X_train, X_test, Y_train, Y_test = loaddata('./data3.csv')
        #Y_train = to_categorical(Y_train)
        #Y_test = to_categorical(Y_test)
    
        parmeters = model(X_train, Y_train, X_test, Y_test)
    

      



  • 相关阅读:
    java.logging的重定向?
    java.rmi.NoSuchObjectException: no such object in table
    jmx : ClientCommunicatorAdmin Checker-run
    jmx完整示例
    Android studio 下的SDK Manager只显示已安装包的情况
    Android Studio: Error:Cannot locate factory for objects of type DefaultGradleConnector, as ConnectorServiceRegistry
    浅谈Kotlin(二):基本类型、基本语法、代码风格
    浅谈Kotlin(一):简介及Android Studio中配置
    源码浅谈(一):java中的 toString()方法
    ButterKnife注解框架详解
  • 原文地址:https://www.cnblogs.com/flightless/p/10690955.html
Copyright © 2020-2023  润新知