• tensorboard 可视化


    #coding = utf8
    
    import tensorflow as tf
    from tensorflow.examples.tutorials.mnist import input_data
    
    
    mnist = input_data.read_data_sets('../MNIST_data', one_hot=True)
    
    
    batch_size = 100
    
    n_batch = mnist.train.num_examples // batch_size
    
    
    def variable_summaries(var):
        with tf.name_scope('summary'):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev)
            tf.summary.scalar('max', tf.reduce_max(var))
            tf.summary.scalar('min', tf.reduce_min(var))
            tf.summary.histogram('histogram', var)
    
    #namescope
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, 784], name='x-input')
        y = tf.placeholder(tf.float32, [None, 10], name='y-input')
    
    with tf.name_scope('layer'):
    
        with tf.name_scope('weigh'):
            W = tf.Variable(tf.zeros([784, 10]), name = 'W')
            variable_summaries(W)
        with tf.name_scope('biases'):
            b = tf.Variable(tf.zeros([10]), name = 'b')
            variable_summaries(b)
        with tf.name_scope('wx_plus_b'):
            wx_plus_b =    tf.matmul(x, W) + b
        with tf.name_scope('softmax'):
            prediction = tf.nn.softmax(wx_plus_b)
    
    with tf.name_scope('loss'):
        #loss = tf.reduce_mean(tf.square(y - prediction))
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
        tf.summary.scalar('loss', loss)
    
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    
    init = tf.global_variables_initializer()
    
    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    merged = tf.summary.merge_all()
    
    
    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter('logs/', sess.graph)
        for epoch in range(25):
            for batch in range(n_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                summary, _ = sess.run([merged, train_step], feed_dict={x:batch_xs, y:batch_ys})
    
            writer.add_summary(summary, epoch)
            acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels})
            print 'Iter' + str(epoch) + ', Testing Accuracy' + str(acc)
  • 相关阅读:
    jQuery实现button按钮提交表单
    jQuery实现button按钮提交表单
    egret清除缓存的方法
    jquery 路径动画贝塞尔动画
    html5自动横屏的方法
    vm虚拟机安装linux centos教程
    php pdo mysql存入转义
    格式工厂转化成mp4 avc格式 暴风影音不能播放的解决方法
    PHP过滤换行的方法
    css3 svg路径蒙版动画
  • 原文地址:https://www.cnblogs.com/TMatrix52/p/8425620.html
Copyright © 2020-2023  润新知