手写数字识别
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data def full_connected(): #获取真实数据 mnist=input_data.read_data_sets("./data/input_data/",one_hot=True) #1.建立数据的占位符 x[None,784] y_true [None,10] with tf.variable_scope("data"): x=tf.placeholder(tf.float32,[None,784]) #提供数据 y_true=tf.placeholder(tf.int32,[None,10]) #2建立一个全连接层的神经网络 w[784,10] b[10] with tf.variable_scope("data"): weight = tf.Variable(tf.random_normal([784, 10], mean=0.0, stddev=1.0), name="w") bias = tf.Variable(tf.constant(0.0, shape=[10])) # 预测None个样本的输出结matrix=[None,784]*[784,10]+[10]=[None,10] y_predict = tf.matmul(x, weight) + bias with tf.variable_scope("soft_cross"): #求平均交叉熵损失 loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_predict)) #梯度下降求出损失 with tf.variable_scope("optimizer"): train_op=tf.train.GradientDescentOptimizer(0.1).minimize(loss) #计算准确率 with tf.variable_scope("acc"): equal_list=tf.equal(tf.argmax(y_true,1),tf.argmax(y_predict,1)) #找出预测值里面最大的和真实值进行比较,如果两个位置相等就为一,不相等就是0 #equal_list None个样本 [1,1,0,1,0,0.....] accuracy=tf.reduce_mean(tf.cast(equal_list,tf.float32)) #将求出的一相加除以所有值,就是准确率 #定义一个初始化变量op init_op=tf.global_variables_initializer() #创建一个saver saver=tf.train.Saver() #开启会话训练 with tf.Session() as sess: sess.run(init_op) #迭代训练,更新参数预测 for i in range(2000): #取出真实存在的特征值和目标值 mnist_x,mnist_y=mnist.train.next_batch(50) #运行train_op训练 sess.run(train_op,feed_dict={x:mnist_x,y_true:mnist_y }) print("训练第%d步,准确率为:%f"%(i,sess.run(accuracy,feed_dict={x:mnist_x,y_true:mnist_y }))) saver.save(sess,"./ckpt/fc_model") return None if __name__=="__main__": full_connected()