所有的ML模型或者DL 模型 都是下面这四个固定套路的步骤
1.获取到所需数据
2.开始搭建模型
3.计算采用何种loss函数
4.选择batch,epoch,feed数据
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets('./tmp/tensorflow/mnist/input_data',one_hot=True) # 下载数据
x = tf.placeholder(tf.float32,[None,784]) # 输入占位符
yresult = tf.placeholder(tf.float32,[None,10]) #输入数据真实的label
w = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,w) + b) # 用不用激励函数 都可以的其实
cross_entropy = -tf.reduce_sum(yresult * tf.log(y)) # loss 值
train_setp = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) #梯度下降法
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
argv1,loss = sess.run([train_setp,cross_entropy],feed_dict={x:batch_xs,yresult:batch_ys}) #如果想知道corss_entropy试试变化值 加入就好。
if i % 200 == 0:
print (loss)
current_prediction = tf.equal(tf.argmax(y,1),tf.argmax(yresult,1)) # compare real and calculate
accuracy = tf.reduce_mean(tf.cast(current_prediction,tf.float32)) # 数据类型转换 然后求匹配上的概率
result = sess.run(accuracy,feed_dict={x:mnist.test.images,yresult:mnist.test.labels}) # test数据入口
print(str(result * 100) + '%')