• tensorflow1.0 构建卷积神经网络


    import tensorflow as tf
    from tensorflow.examples.tutorials.mnist import input_data
    import os
    os.environ["CUDA_DEVICE_ORDER"] = "0,1"
    
    
    mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
    
    def compute_accuracy(v_xs,v_ys):
        global prediction
        y_pre = sess.run(prediction,feed_dict ={xs:v_xs,keep_prob:1})
        correct_predicton = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
        accuracy = tf.reduce_mean(tf.cast(correct_predicton,tf.float32))
        result = sess.run(accuracy,feed_dict = {xs:v_xs,ys:v_ys,keep_prob:1})
        return result
    
    def weight_variable(shape):
        initial = tf.truncated_normal(shape=shape,stddev=0.1)
        return tf.Variable(initial)
    
    def bias_variable(shape):
        initial = tf.constant(0.1,shape=shape)
        return tf.Variable(initial)
    
    def conv2d(x,W):
        #stride [1,x_movement,y_movement,1]
        #Must have strides[0] = strides[3] = 1
        return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding="SAME")
    
    def max_pool_2x2(x):
        # stride [1,x_movement,y_movement,1]
        return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
    
    def add_layer(inputs,in_size,out_size,activation_function=None):
        Weight = tf.Variable(tf.random_normal([in_size,out_size]))
        biases = tf.Variable(tf.zeros([1,out_size])+0.1)
        Wx_plus_b = tf.matmul(inputs,Weight)+biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs
    
    #define placeholder for inputs to network
    
    xs = tf.placeholder(tf.float32,[None,784])
    ys = tf.placeholder(tf.float32,[None,10])
    keep_prob = tf.placeholder(tf.float32)
    x_image = tf.reshape(xs,[-1,28,28,1])
    
    ## conv1 layer ##
    W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
    b_conv1 = bias_variable([32])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32
    h_pool1 = max_pool_2x2(h_conv1)                                         # output size 14x14x32
    
    ## conv2 layer ##
    W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64
    h_pool2 = max_pool_2x2(h_conv2)              # output size 7x7x64
    
    
    # #func1 layer
    # input = tf.reshape(h_pool2,[-1,7*7*64])
    # fc1 = add_layer(input,7*7*64,1024,activation_function=tf.nn.relu)
    # fc1_drop = tf.nn.dropout(fc1,keep_prob)
    #
    # #func2 layer
    # fc2 = add_layer(fc1_drop,1024,10,activation_function=tf.nn.softmax)
    # prediction = fc2
    
    ## func1 layer ##
    W_fc1 = weight_variable([7*7*64, 1024])
    b_fc1 = bias_variable([1024])
    # [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    
    ## func2 layer ##
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    
    #loss
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
    
    train_step = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
    
    config = tf.ConfigProto(log_device_placement=True)
    config.gpu_options.allow_growth = True
    
    sess = tf.Session(config=config)
    
    sess.run(tf.initialize_all_variables())
    
    for i in range(1000):
        batch_xs,batch_ys = mnist.train.next_batch(100)
        sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys,keep_prob:0.5})
        if i%50 ==0:
            print(compute_accuracy(mnist.test.images,mnist.test.labels))
    

      

    多思考也是一种努力,做出正确的分析和选择,因为我们的时间和精力都有限,所以把时间花在更有价值的地方。
  • 相关阅读:
    linux常用命令(一)
    并发与高并发(十九) 高并发の消息队列思路
    鉴别web服务器的工具类
    并发与高并发(十八) 高并发之缓存思路
    记一次多线程下使用while出现的问题
    并发与高并发(十七)高并发之扩容思路
    并发与高并发(十六)多线程并发拓展
    并发与高并发(十五)线程池
    并发与高并发(十四)J.U.C组件拓展
    基于springboot实现Java阿里短信发送
  • 原文地址:https://www.cnblogs.com/LiuXinyu12378/p/12495395.html
Copyright © 2020-2023  润新知