• tensorflow训练自己的数据集实现CNN图像分类2(保存模型&测试单张图片)


      神经网络训练的时候,我们需要将模型保存下来,方便后面继续训练或者用训练好的模型进行测试。因此,我们需要创建一个saver保存模型

     1 def run_training():
     2     data_dir = 'C:/Users/wk/Desktop/bky/dataSet/'
     3     log_dir = 'C:/Users/wk/Desktop/bky/log/'
     4     image,label = inputData.get_files(data_dir)
     5     image_batches,label_batches = inputData.get_batches(image,label,32,32,16,20)
     6     print(image_batches.shape)
     7     p = model.mmodel(image_batches,16)
     8     cost = model.loss(p,label_batches)
     9     train_op = model.training(cost,0.001)
    10     acc = model.get_accuracy(p,label_batches)
    11     
    12     sess = tf.Session()
    13     init = tf.global_variables_initializer()
    14     sess.run(init)
    15     saver = tf.train.Saver()
    16     coord = tf.train.Coordinator()
    17     threads = tf.train.start_queue_runners(sess = sess,coord = coord)
    18     
    19     try:
    20        for step in np.arange(1000):
    21            print(step)
    22            if coord.should_stop():
    23                break
    24            _,train_acc,train_loss = sess.run([train_op,acc,cost])
    25            print("loss:{} accuracy:{}".format(train_loss,train_acc))
    26            if step % 100 == 0:
    27                check = os.path.join(log_dir,"model.ckpt")
    28                saver.save(sess,check,global_step = step)
    29     except tf.errors.OutOfRangeError:
    30         print("Done!!!")
    31     finally:
    32         coord.request_stop()
    33     coord.join(threads)
    34     sess.close()

      训练好的模型信息会记录在checkpoint文件中,大致如下:

    model_checkpoint_path: "C:/Users/wk/Desktop/bky/log/model.ckpt-100"
    all_model_checkpoint_paths: "C:/Users/wk/Desktop/bky/log/model.ckpt-0"
    all_model_checkpoint_paths: "C:/Users/wk/Desktop/bky/log/model.ckpt-100"

      其余还会生成一些文件,分别记录了模型参数等信息,后边测试的时候程序会读取checkpoint文件去加载这些真正的数据文件

      构建好神经网络进行训练完成后,如果用之前的代码直接进行测试,会报shape不符合的错误,大致是卷积层的输入与图像的shape不一致,这是因为上篇的代码,将weights和biases定义在了模型的外面,调用模型的时候,出现valueError的错误。

      因此,我们需要将参数定义在模型里面,加载训练好的模型参数时,训练好的参数才能够真正初始化模型。重写模型函数如下

     1 def mmodel(images,batch_size):
     2     with tf.variable_scope('conv1') as scope:
     3         weights = tf.get_variable('weights', 
     4                                   shape = [3,3,3, 16],
     5                                   dtype = tf.float32, 
     6                                   initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
     7         biases = tf.get_variable('biases', 
     8                                  shape=[16],
     9                                  dtype=tf.float32,
    10                                  initializer=tf.constant_initializer(0.1))
    11         conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME')
    12         pre_activation = tf.nn.bias_add(conv, biases)
    13         conv1 = tf.nn.relu(pre_activation, name= scope.name)
    14     with tf.variable_scope('pooling1_lrn') as scope:
    15         pool1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1],strides=[1,2,2,1],
    16                                padding='SAME', name='pooling1')
    17         norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
    18                           beta=0.75,name='norm1')
    19     with tf.variable_scope('conv2') as scope:
    20         weights = tf.get_variable('weights',
    21                                   shape=[3,3,16,128],
    22                                   dtype=tf.float32,
    23                                   initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
    24         biases = tf.get_variable('biases',
    25                                  shape=[128], 
    26                                  dtype=tf.float32,
    27                                  initializer=tf.constant_initializer(0.1))
    28         conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
    29         pre_activation = tf.nn.bias_add(conv, biases)
    30         conv2 = tf.nn.relu(pre_activation, name='conv2')    
    31     with tf.variable_scope('pooling2_lrn') as scope:
    32         norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
    33                           beta=0.75,name='norm2')
    34         pool2 = tf.nn.max_pool(norm2, ksize=[1,2,2,1], strides=[1,1,1,1],
    35                                padding='SAME',name='pooling2')
    36     with tf.variable_scope('local3') as scope:
    37         reshape = tf.reshape(pool2, shape=[batch_size, -1])
    38         dim = reshape.get_shape()[1].value
    39         weights = tf.get_variable('weights',
    40                                   shape=[dim,4096],
    41                                   dtype=tf.float32,
    42                                   initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
    43         biases = tf.get_variable('biases',
    44                                  shape=[4096],
    45                                  dtype=tf.float32, 
    46                                  initializer=tf.constant_initializer(0.1))
    47         local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) 
    48     with tf.variable_scope('softmax_linear') as scope:
    49         weights = tf.get_variable('softmax_linear',
    50                                   shape=[4096, 2],
    51                                   dtype=tf.float32,
    52                                   initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
    53         biases = tf.get_variable('biases', 
    54                                  shape=[2],
    55                                  dtype=tf.float32, 
    56                                  initializer=tf.constant_initializer(0.1))
    57         softmax_linear = tf.add(tf.matmul(local3, weights), biases, name='softmax_linear')
    58     return softmax_linear

     测试训练好的模型

    首先获取一张测试图像

    1 def get_one_image(img_dir):
    2     image = Image.open(img_dir)
    3     plt.imshow(image)
    4     image = image.resize([32, 32])
    5     image_arr = np.array(image)
    6     return image_arr

    加载模型,计算测试结果

     1 def test(test_file):
     2     log_dir = 'C:/Users/wk/Desktop/bky/log/'
     3     image_arr = get_one_image(test_file)
     4     
     5     with tf.Graph().as_default():
     6         image = tf.cast(image_arr, tf.float32)
     7         image = tf.image.per_image_standardization(image)
     8         image = tf.reshape(image, [1,32, 32, 3])
     9         print(image.shape)
    10         p = model.mmodel(image,1)
    11         logits = tf.nn.softmax(p)
    12         x = tf.placeholder(tf.float32,shape = [32,32,3])
    13         saver = tf.train.Saver()
    14         with tf.Session() as sess:
    15             ckpt = tf.train.get_checkpoint_state(log_dir)
    16             if ckpt and ckpt.model_checkpoint_path:
    17                 global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
    18                 saver.restore(sess, ckpt.model_checkpoint_path)
    19                 print('Loading success)
    20             else:
    21                 print('No checkpoint')
    22             prediction = sess.run(logits, feed_dict={x: image_arr})
    23             max_index = np.argmax(prediction)
    24             print(max_index)

    前面主要是将测试图片标准化为网络的输入图像,15-19是加载模型文件,然后将图像输入到模型里即可

  • 相关阅读:
    LeetCode:Symmetric Tree
    LeetCode:Construct Binary Tree from Inorder and Postorder Traversal,Construct Binary Tree from Preorder and Inorder Traversal
    LeetCode:Binary Tree Level Order Traversal I II
    LeetCode:Binary Tree Zigzag Level Order Traversal
    LeetCode:Convert Sorted Array to Binary Search Tree,Convert Sorted List to Binary Search Tree
    LeetCode:Balanced Binary Tree
    LeetCode:Minimum Depth of Binary Tree,Maximum Depth of Binary Tree
    LeetCode:Path Sum I II
    LeetCode:Flatten Binary Tree to Linked List
    LeetCode:Distinct Subsequences
  • 原文地址:https://www.cnblogs.com/wktwj/p/7234799.html
Copyright © 2020-2023  润新知