原文地址:
https://www.jianshu.com/p/1b1ea45fab47
-----------------------------------------------------------------------------------
static_rnn和dynamic_rnn
1: static_rnn
x = tf.placeholder("float", [None, n_steps, n_input]) x1 = tf.unstack(x, n_steps, 1) lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x1, dtype=tf.float32) pred = tf.contrib.layers.fully_connected(outputs[-1],n_classes,activation_fn = None)
2: dynamic_rnn
x = tf.placeholder("float", [None, n_steps, n_input]) lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) outputs,_ = tf.nn.dynamic_rnn(lstm_cell ,x,dtype=tf.float32) outputs = tf.transpose(outputs, [1, 0, 2]) pred = tf.contrib.layers.fully_connected(outputs[-1],n_classes,activation_fn = None)
BasicLSTMCell:
(num_units: 是指一个Cell中神经元的个数,forget_bias:忘记门记住多少,1.0代表全部记住)
(num_units: 是指一个Cell中神经元的个数,forget_bias:忘记门记住多少,1.0代表全部记住)
tf.contrib.rnn.static_rnn:
静态 rnn的意思就是按照样本时间序列个数(n_steps)展开,在图中创建(n_steps)个序列的cell
tf.nn.dynamic_rnn:
动态rnn的意思是只创建样本中的一个序列RNN,其他序列数据会通过循环进入该RNN运算。 通过静态static_rnn生成的RNN网络,生成过程所需的时间会更长,网络所占有的内存会更多,导出的模型会更大。static_rnn模型中会带有第个序列中间态的信息,利于调试。static_rnn在使用时必须与训练的样本序列个数相同。dynamic_rnn通过动态生成的RNN网络,所占用内存较少。dynamic_rnn模型中只会有最后的状态,在使用时还能支持不同的序列个数。
区别
1.tf.nn.dynamic_rnn与tf.contrib.rnn.static_rnn输入格式不同。
2.tf.nn.dynamic_rnn与tf.contrib.rnn.static_rnn输出格式不同。
3.tf.nn.dynamic_rnn与tf.contrib.rnn.static_rnn内部训练方式。
请仔细对比以下区别:
动态rnn
import tensorflow as tf # 导入 MINST 数据集 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("c:/user/administrator/data/", one_hot=True) n_input = 28 # MNIST data 输入 (img shape: 28*28) n_steps = 28 # timesteps n_hidden = 128 # hidden layer num of features n_classes = 10 # MNIST 列别 (0-9 ,一共10类) batch_size = 128 tf.reset_default_graph() # tf Graph input x = tf.placeholder("float", [None, n_steps, n_input]) y = tf.placeholder("float", [None, n_classes]) lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden, forget_bias=1.0) outputs,_ = tf.nn.dynamic_rnn(lstm_cell,x,dtype=tf.float32) outputs = tf.transpose(outputs, [1, 0, 2]) #取最后一条输出信息,(outputs[-1]) pred = tf.contrib.layers.fully_connected(outputs[-1],n_classes,activation_fn = None) learning_rate = 0.001 training_iters = 100000 display_step = 10 # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # 启动session with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # 计算批次数据的准确率 acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)) step += 1 print (" Finished!") # 计算准确率 for 128 mnist test images test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
静态RNN
import tensorflow as tf # 导入 MINST 数据集 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("c:/user/administrator/data/", one_hot=True) n_input = 28 # MNIST data 输入 (img shape: 28*28) n_steps = 28 # timesteps n_hidden = 128 # hidden layer num of features n_classes = 10 # MNIST 列别 (0-9 ,一共10类) batch_size = 128 tf.reset_default_graph() # tf Graph input x = tf.placeholder("float", [None, n_steps, n_input]) y = tf.placeholder("float", [None, n_classes]) lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden, forget_bias=1.0) x1 = tf.unstack(x, n_steps, 1) lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden, forget_bias=1.0) outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x1, dtype=tf.float32) #取最后一条输出信息,(outputs[-1]) pred = tf.contrib.layers.fully_connected(outputs[-1],n_classes,activation_fn = None) learning_rate = 0.001 training_iters = 100000 display_step = 10 # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # 启动session with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # 计算批次数据的准确率 acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)) step += 1 print (" Finished!") # 计算准确率 for 128 mnist test images test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
本代码源自:
凯文自学TensorFlow
# -*- coding: utf-8 -*- import tensorflow as tf # 导入 MINST 数据集 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("c:/user/administrator/data/", one_hot=True) n_input = 28 # MNIST data 输入 (img shape: 28*28) n_steps = 28 # timesteps n_hidden = 128 # hidden layer num of features n_classes = 10 # MNIST 列别 (0-9 ,一共10类) batch_size = 128 tf.reset_default_graph() # tf Graph input x = tf.placeholder("float", [None, n_steps, n_input]) y = tf.placeholder("float", [None, n_classes]) #重置x以适合tf.contrib.rnn.static_rnn所要求的格式 #x1 = tf.unstack(x, n_steps, 1) #BasicLSTMCell(num_units: 是指一个Cell中神经元的个数,forget_bias:忘记门记住多少,1.0代表全部记住) #静态 (tf.contrib.rnn.static_rnn)的意思就是按照样本时间序列个数(n_steps)展开,在图中创建(n_steps)个序列的cell; #动态(tf.nn.dynamic_rnn)的意思是只创建样本中的一个序列RNN,其他序列数据会通过循环进入该RNN运算 """ 通过静态生成的RNN网络,生成过程所需的时间会更长,网络所占有的内存会更多,导出的模型会更大 。模型中会带有第个序列中间态的信息,利于调试。在使用时必须与训练的样本序列个数相同。通过动 态生成的RNN网络,所占用内存较少。模型中只会有最后的状态,在使用时还能支持不同的序列个数。 """ #lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) #outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x1, dtype=tf.float32) """ #2 LSTMCell,LSTM实现的一个高级版本(use_peepholes:默认False,True表示启用peephole连接) cell_clip:是否在输出前对cell状态按照给定值进行截断处理 initializer:指定初始化函数 num_proj:通过projection进行模型压缩的输出维度 proj_clip:将num_proj按照给定的proj_clip截断 """ #lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden, forget_bias=1.0) #outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x1, dtype=tf.float32) #3 gru类定义 #gru = tf.contrib.rnn.GRUCell(n_hidden) #outputs = tf.contrib.rnn.static_rnn(gru, x1, dtype=tf.float32) #4 创建动态RNN,此时的输入是x,是动态的[None, n_steps, n_input]LIST #具体定义参考https://blog.csdn.net/mzpmzk/article/details/80573338 gru = tf.contrib.rnn.GRUCell(n_hidden) outputs,_ = tf.nn.dynamic_rnn(gru,x,dtype=tf.float32) outputs = tf.transpose(outputs, [1, 0, 2]) #取最后一条输出信息,(outputs[-1]) pred = tf.contrib.layers.fully_connected(outputs[-1],n_classes,activation_fn = None) learning_rate = 0.001 training_iters = 100000 display_step = 10 # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # 启动session with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # 计算批次数据的准确率 acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)) step += 1 print (" Finished!") # 计算准确率 for 128 mnist test images test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
-----------------------------------------------------------------------------------