• 莫烦TensorFlow_08 tensorboard可视化进阶


    import tensorflow as tf  
    import numpy as np  
    import matplotlib.pyplot as plt  
      
     #
     # add layer
     #
    def add_layer(inputs, in_size, out_size,n_layer, activation_function = None):  
      layer_name = 'layer%s' % n_layer
      with tf.name_scope(layer_name):
        with tf.name_scope('Weights'):
          Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')  # hang lie  
          tf.summary.histogram(layer_name + '/weights', Weights)#保存成一个直方图,bin是取值
        with tf.name_scope('biases'):
          biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')  
          tf.summary.histogram(layer_name + '/biases', biases)#注意histogram的路径
        with tf.name_scope('Wx_plus_b'):
          Wx_plus_b = tf.matmul(inputs, Weights) + biases  
        
        if activation_function is None:  
          outputs = Wx_plus_b  
        else:  
          outputs = activation_function(Wx_plus_b)  
          
        tf.summary.histogram(layer_name + '/outputs', outputs)  
        return outputs  
    #
    #make up some data
    #
    x_data = np.linspace(-1,1,300)[:, np.newaxis]
    noise  = np.random.normal(0, 0.05, x_data.shape)
    y_data = np.square(x_data) - 0.5 + noise
     
    #
    #define placeholder
    #
    with tf.name_scope('inputs'):
      xs = tf.placeholder(tf.float32, [None, 1], name = 'x_input') #注意命名
      ys = tf.placeholder(tf.float32, [None, 1], name = 'y_input')  
    
    #add hidden layer
    l1 = add_layer(xs, 1, 10, n_layer = 1,activation_function = tf.nn.relu)  
    #add output layer
    prediction = add_layer(l1, 10, 1, n_layer = 2, activation_function = None)  
    
    #the error between prediction and real data  
    with tf.name_scope('loss'):
      loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),   
    				    reduction_indices=[1]  ))  
      tf.summary.scalar('loss', loss)#记录operation,是存储在scaler里的
      
    with tf.name_scope('train'):
      train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)  
      
    sess = tf.Session()  
    merged = tf.summary.merge_all() #所有的summary在merge以后,在一个run中就可执行
    writer = tf.summary.FileWriter("logs/", sess.graph) #定义writer
    
    #import step 
    sess.run(tf.global_variables_initializer() )
    
    #
    # Session
    #
    
    for i in range(1000):
      sess.run(train_step, feed_dict={xs:x_data, ys:y_data}) 
      if i % 50 == 0:
        result = sess.run(merged, # 否则要一个个run summary。
    		      feed_dict = {xs:x_data, ys:y_data})
        
        writer.add_summary(result, i)#按序列写入结果
        print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
    

      

  • 相关阅读:
    elasticsearch配置文件详解
    《禅的故事》--易中天
    《爱你就像爱生命》--王小波
    Adaboost算法及其代码实现
    HOG特征原理及代码实现
    SMO算法--SVM(3)
    非线性支持向量机SVM
    核方法-核技巧-核函数
    线性可分支持向量机与软间隔最大化--SVM(2)
    拉格朗日乘子(Lagrange multify)和KKT条件
  • 原文地址:https://www.cnblogs.com/alexYuin/p/8684244.html
Copyright © 2020-2023  润新知