• 基于OpenCV和caffe的字母识别模型训练


    概述

    • 系统环境: Ubuntu 14.04

    • 软件环境: OpenCV 3.1+opencv_contrib ,caffe,code::blocks

      整体思路是:用已知字母数据集为训练样本,利用caffe内的改进LeNet模型训练得到一个caffemodel;然后利用OpenCV的dnn模块(在opencv_contrib包内)加载model和网络文件,再进行预测结果的输出。

    训练

    选择网络

    选LeNet为字母识别网络模型,它在识别手写体数字上表现优异,而且模型简单,易于理解。 ###准备数据

    数据选择为已有的字母数据集,包括了AZ(az)等,数据进行格式化,都整理成28×28大小,并且字符区域大小为20×20(就是周边要有多余的黑边,字符区域不能太靠近边界),并将数据分为两类,训练和测试数据。

    Makefile文件:

    cmake_minimum_required(VERSION 2.8)
    
    project( test )
    
    find_package( OpenCV REQUIRED )
    
    add_executable( test test.cpp )
    
    target_link_libraries( test ${OpenCV_LIBS} )
    
    

    格式化代码为:

    
    
    //#include <boost/lexical_cast.hpp>
    
    #include <iostream>
    
    #include<opencv2/opencv.hpp>
    
    #include <dirent.h>
    
    #include <vector>
    
    #include <cmath>
    
    #include <string>  
    
    #include <sstream>
    
    #include <fstream>
    
    
    using namespace std;
    
    using namespace cv;
    
    
    
    //批量处理函数
    
    void createDocList(std::vector<std::string> &doc_list){
    
        int return_code;
    
        DIR *dir;
    
        struct dirent entry;
    
        struct dirent *res;
    
    	//cout<<"duquwenjian"<<endl;
    
        string real_dir = "./img";//搜索的目录
    
        if ((dir = opendir(real_dir.c_str())) != NULL) {//打开目录
    
            for (return_code = readdir_r(dir, &entry, &res);res != NULL && return_code == 0;return_code = readdir_r(dir, &entry, &res)) {
    
                if (entry.d_type != DT_DIR) {//存放到列表中
    
    			
    
                    doc_list.push_back(string(entry.d_name));
    
    				
    
                }
    
            }
    
            closedir(dir);//关闭目录
    
        }
    
    }
    
    
    int main()
    
    {
    
      	string file_path = "./img/";
    
    	string search_path = file_path + "*.jpg";
    
    	vector<string> file_list;
    
    	createDocList(file_list);
    
    	sort(file_list.begin(),file_list.end());
    
    	cout<<"chengxu"<<endl;
    
    	if (file_list.size()==0)
    
    		cout << "open file error!" << endl;
    
    
    	ofstream out;
    
    	out.open("./filelist.txt");
    
    	for (int i = 0; i < file_list.size(); i++)
    
    	{
    
    		stringstream ss, sss,snum;
    
    		
    
    		
    
    
    		string s, m,n;
    
    		snum<<i;
    
    		snum>>n;
    
    		ss<<"/opt/caffe-master/data/mnist-testdata/chars2/testA/"+n+"testA.jpg";//输出图像路径
    
    		ss<<"/home/anmeng/test/"+file_list[i];
    
    		sss<<"./data/mnist-testdata/chars2/testA/"+n+"testA.jpg"+&#39;T&#39;+"0";//写入文本中的路径
    
    		
    
    		sss>>m;
    
    		ss>>s;
    
    		out<<m<<&#39;
    &#39;;
    
    		
    
    		string image_path = file_path + file_list[i];
    
    		Mat img = imread(image_path,1);
    
    		resize(img,img,Size(20,20));
    
    		Mat in_large=Mat::zeros(Size(28,28),img.type());
    
    
    		float x = in_large.cols / 2 - img.cols / 2;
    
    		float y = in_large.rows / 2 - img.rows / 2;
    
    
    		copyMakeBorder(img, in_large, y, y, x, x, BORDER_CONSTANT, Scalar::all(0));
    
    
    		resize(in_large, in_large, Size(28, 28));
    
    		//resize(img,img,Size(28,28));
    
    		imwrite(s,in_large);
    
    		ss.clear();
    
    		sss.clear();
    
    		
    
    	}
    
      
    
      	out.close();
    
    
    
    
      return 0;
    
    
    
    }
    
    
    
    

    最后得到的是格式化图像数据和图像的路径和标签数据:

    转换数据

    将jpg转为LMDB格式:直接利用creat_imagenet.sh(修改后)进行转换,获取训练数据和测试数据

    修改后:

    
    #!/usr/bin/env sh
    
    # Create the imagenet lmdb inputs
    
    # N.B. set the path to the imagenet train + val data dirs
    
    EXAMPLE=examples/mnist-test   #放生成的lmdb数据的路径
    
    DATA=data/mnist-testdata      #包含val.txt的路径,就是训练或测试数据路径和标签的文本的路径
    
    TOOLS=build/tools             
    
    #TRAIN_DATA_ROOT=/path/to/imagenet/train/
    
    #VAL_DATA_ROOT=data/mnist-testdata/32img
    
    VAL_DATA_ROOT=/opt/caffe-master/   #数据文本中省略的部分路径
    
    # Set RESIZE=true to resize the images to 256x256. Leave as false if images have
    
    # already been resized using another tool.
    
    RESIZE=false
    
    if $RESIZE; then
    
      RESIZE_HEIGHT=256
    
      RESIZE_WIDTH=256
    
    else
    
      RESIZE_HEIGHT=0
    
      RESIZE_WIDTH=0
    
    fi
    
    #if [ ! -d "$TRAIN_DATA_ROOT" ]; then
    
     # echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT"
    
      #echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" 
    
        #   "where the ImageNet training data is stored."
    
      #exit 1
    
    #fi
    
    if [ ! -d "$VAL_DATA_ROOT" ]; then
    
      echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT"
    
      echo "Set the VAL_DATA_ROOT variable in create_imagenet.sh to the path" 
    
           "where the ImageNet validation data is stored."
    
      exit 1
    
    fi
    
    echo "Creating train lmdb..."
    
    GLOG_logtostderr=1 $TOOLS/convert_imageset 
    
        --resize_height=$RESIZE_HEIGHT 
    
        --resize_width=$RESIZE_WIDTH 
    
        --shuffle 
    
        $TRAIN_DATA_ROOT 
    
        $DATA/train.txt 
    
        $EXAMPLE/digit_train_lmdb    #生成的lmdb名称,为字母训练数据
    
    echo "Creating val lmdb..."
    
    GLOG_logtostderr=1 $TOOLS/convert_imageset 
    
        --resize_height=$RESIZE_HEIGHT 
    
        --resize_width=$RESIZE_WIDTH 
    
        --shuffle 
    
        --gray              //输入是灰度的图
    
        $VAL_DATA_ROOT 
    
        $DATA/val.txt 
    
        $EXAMPLE/digit_test_lmdb  #生成的lmdb名称,为字母测试数据
    
    
    
    echo "Done."
    
    

    训练和测试模型

    运行的是命令是:

    
    
    ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt
    
    

    运行caffe.cpp 跟上参数train 和后面的solver,就可以训练和测试了。

    其中, lenet_solver.prototxt 中的内容是:

    
    
    # The train/test net protocol buffer definition
    
    net: "examples/mnist/lenet_train_test.prototxt"
    
    # test_iter specifies how many forward passes the test should carry out.
    
    # In the case of MNIST, we have test batch size 100 and 100 test iterations,
    
    # covering the full 10,000 testing images.
    
    test_iter: 100
    
    # Carry out testing every 500 training iterations.
    
    test_interval: 500
    
    # The base learning rate, momentum and the weight decay of the network.
    
    base_lr: 0.01
    
    momentum: 0.9
    
    weight_decay: 0.0005
    
    # The learning rate policy
    
    lr_policy: "inv"
    
    gamma: 0.0001
    
    power: 0.75
    
    # Display every 100 iterations
    
    display: 100
    
    # The maximum number of iterations
    
    max_iter: 10000
    
    # snapshot intermediate results
    
    snapshot: 5000
    
    snapshot_prefix: "examples/mnist/lenet"
    
    # solver mode: CPU or GPU
    
    solver_mode: GPU
    
    

    训练中用的 lenet_train_test.prototxt 内容 :

    
    name: "LeNet"
    
    layer {
    
      name: "mnist"
    
      type: "Data"
    
      top: "data"
    
      top: "label"
    
      include {
    
        phase: TRAIN
    
      }
    
      transform_param {
    
        scale: 0.00390625
    
      }
    
      data_param {
    
        source: "examples/mnist-test/digit_train_lmdb"
    
        batch_size: 64
    
        backend: LMDB
    
      }
    
    }
    
    layer {
    
      name: "mnist"
    
      type: "Data"
    
      top: "data"
    
      top: "label"
    
      include {
    
        phase: TEST
    
      }
    
      transform_param {
    
        scale: 0.00390625
    
      }
    
      data_param {
    
        source: "examples/mnist-test/digit_test_lmdb"
    
        batch_size: 100
    
        backend: LMDB
    
      }
    
    }
    
    layer {
    
      name: "conv1"
    
      type: "Convolution"
    
      bottom: "data"
    
      top: "conv1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 20
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool1"
    
      type: "Pooling"
    
      bottom: "conv1"
    
      top: "pool1"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "conv2"
    
      type: "Convolution"
    
      bottom: "pool1"
    
      top: "conv2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 50
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool2"
    
      type: "Pooling"
    
      bottom: "conv2"
    
      top: "pool2"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "ip1"
    
      type: "InnerProduct"
    
      bottom: "pool2"
    
      top: "ip1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 500
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "relu1"
    
      type: "ReLU"
    
      bottom: "ip1"
    
      top: "ip1"
    
    }
    
    layer {
    
      name: "ip2"
    
      type: "InnerProduct"
    
      bottom: "ip1"
    
      top: "ip2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 12
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "accuracy"
    
      type: "Accuracy"
    
      bottom: "ip2"
    
      bottom: "label"
    
      top: "accuracy"
    
      include {
    
        phase: TEST
    
      }
    
    }
    
    layer {
    
      name: "loss"
    
      type: "SoftmaxWithLoss"
    
      bottom: "ip2"
    
      bottom: "label"
    
      top: "loss"
    
    }
    
    

    最终的结果是:

    
    I0411 20:10:53.763710 26325 caffe.cpp:185] Using GPUs 0
    
    I0411 20:10:53.772902 26325 caffe.cpp:190] GPU 0: GeForce GTX 960M
    
    I0411 20:10:53.912603 26325 solver.cpp:48] Initializing solver from parameters: 
    
    test_iter: 100
    
    test_interval: 500
    
    base_lr: 0.01
    
    display: 100
    
    max_iter: 10000
    
    lr_policy: "inv"
    
    gamma: 0.0001
    
    power: 0.75
    
    momentum: 0.9
    
    weight_decay: 0.0005
    
    snapshot: 5000
    
    snapshot_prefix: "examples/mnist/lenet"
    
    solver_mode: GPU
    
    device_id: 0
    
    net: "examples/mnist/lenet_train_test.prototxt"
    
    I0411 20:10:53.912749 26325 solver.cpp:91] Creating training net from net file: examples/mnist/lenet_train_test.prototxt
    
    I0411 20:10:53.913069 26325 net.cpp:313] The NetState phase (0) differed from the phase (1) specified by a rule in layer mnist
    
    I0411 20:10:53.913086 26325 net.cpp:313] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
    
    I0411 20:10:53.913151 26325 net.cpp:49] Initializing net from parameters: 
    
    name: "LeNet"
    
    state {
    
      phase: TRAIN
    
    }
    
    layer {
    
      name: "mnist"
    
      type: "Data"
    
      top: "data"
    
      top: "label"
    
      include {
    
        phase: TRAIN
    
      }
    
      transform_param {
    
        scale: 0.00390625
    
      }
    
      data_param {
    
        source: "examples/mnist-test/digit_train_lmdb"
    
        batch_size: 64
    
        backend: LMDB
    
      }
    
    }
    
    layer {
    
      name: "conv1"
    
      type: "Convolution"
    
      bottom: "data"
    
      top: "conv1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 20
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool1"
    
      type: "Pooling"
    
      bottom: "conv1"
    
      top: "pool1"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "conv2"
    
      type: "Convolution"
    
      bottom: "pool1"
    
      top: "conv2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 50
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool2"
    
      type: "Pooling"
    
      bottom: "conv2"
    
      top: "pool2"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "ip1"
    
      type: "InnerProduct"
    
      bottom: "pool2"
    
      top: "ip1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 500
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "relu1"
    
      type: "ReLU"
    
      bottom: "ip1"
    
      top: "ip1"
    
    }
    
    layer {
    
      name: "ip2"
    
      type: "InnerProduct"
    
      bottom: "ip1"
    
      top: "ip2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 12
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "loss"
    
      type: "SoftmaxWithLoss"
    
      bottom: "ip2"
    
      bottom: "label"
    
      top: "loss"
    
    }
    
    I0411 20:10:53.913213 26325 layer_factory.hpp:77] Creating layer mnist
    
    I0411 20:10:53.913669 26325 net.cpp:91] Creating Layer mnist
    
    I0411 20:10:53.913698 26325 net.cpp:399] mnist -> data
    
    I0411 20:10:53.913725 26325 net.cpp:399] mnist -> label
    
    I0411 20:10:53.914347 26330 db_lmdb.cpp:38] Opened lmdb examples/mnist-test/digit_train_lmdb
    
    I0411 20:10:53.929003 26325 data_layer.cpp:41] output data size: 64,1,28,28
    
    I0411 20:10:53.929726 26325 net.cpp:141] Setting up mnist
    
    I0411 20:10:53.929754 26325 net.cpp:148] Top shape: 64 1 28 28 (50176)
    
    I0411 20:10:53.929760 26325 net.cpp:148] Top shape: 64 (64)
    
    I0411 20:10:53.929765 26325 net.cpp:156] Memory required for data: 200960
    
    I0411 20:10:53.929775 26325 layer_factory.hpp:77] Creating layer conv1
    
    I0411 20:10:53.929800 26325 net.cpp:91] Creating Layer conv1
    
    I0411 20:10:53.929807 26325 net.cpp:425] conv1 <- data
    
    I0411 20:10:53.929821 26325 net.cpp:399] conv1 -> conv1
    
    I0411 20:10:54.075495 26325 net.cpp:141] Setting up conv1
    
    I0411 20:10:54.075543 26325 net.cpp:148] Top shape: 64 20 24 24 (737280)
    
    I0411 20:10:54.075551 26325 net.cpp:156] Memory required for data: 3150080
    
    I0411 20:10:54.075572 26325 layer_factory.hpp:77] Creating layer pool1
    
    I0411 20:10:54.075595 26325 net.cpp:91] Creating Layer pool1
    
    I0411 20:10:54.075608 26325 net.cpp:425] pool1 <- conv1
    
    I0411 20:10:54.075623 26325 net.cpp:399] pool1 -> pool1
    
    I0411 20:10:54.075665 26325 net.cpp:141] Setting up pool1
    
    I0411 20:10:54.075673 26325 net.cpp:148] Top shape: 64 20 12 12 (184320)
    
    I0411 20:10:54.075678 26325 net.cpp:156] Memory required for data: 3887360
    
    I0411 20:10:54.075682 26325 layer_factory.hpp:77] Creating layer conv2
    
    I0411 20:10:54.075693 26325 net.cpp:91] Creating Layer conv2
    
    I0411 20:10:54.075698 26325 net.cpp:425] conv2 <- pool1
    
    I0411 20:10:54.075705 26325 net.cpp:399] conv2 -> conv2
    
    I0411 20:10:54.076566 26325 net.cpp:141] Setting up conv2
    
    I0411 20:10:54.076581 26325 net.cpp:148] Top shape: 64 50 8 8 (204800)
    
    I0411 20:10:54.076586 26325 net.cpp:156] Memory required for data: 4706560
    
    I0411 20:10:54.076596 26325 layer_factory.hpp:77] Creating layer pool2
    
    I0411 20:10:54.076604 26325 net.cpp:91] Creating Layer pool2
    
    I0411 20:10:54.076609 26325 net.cpp:425] pool2 <- conv2
    
    I0411 20:10:54.076616 26325 net.cpp:399] pool2 -> pool2
    
    I0411 20:10:54.076647 26325 net.cpp:141] Setting up pool2
    
    I0411 20:10:54.076653 26325 net.cpp:148] Top shape: 64 50 4 4 (51200)
    
    I0411 20:10:54.076658 26325 net.cpp:156] Memory required for data: 4911360
    
    I0411 20:10:54.076663 26325 layer_factory.hpp:77] Creating layer ip1
    
    I0411 20:10:54.076673 26325 net.cpp:91] Creating Layer ip1
    
    I0411 20:10:54.076678 26325 net.cpp:425] ip1 <- pool2
    
    I0411 20:10:54.076684 26325 net.cpp:399] ip1 -> ip1
    
    I0411 20:10:54.079391 26325 net.cpp:141] Setting up ip1
    
    I0411 20:10:54.079404 26325 net.cpp:148] Top shape: 64 500 (32000)
    
    I0411 20:10:54.079409 26325 net.cpp:156] Memory required for data: 5039360
    
    I0411 20:10:54.079418 26325 layer_factory.hpp:77] Creating layer relu1
    
    I0411 20:10:54.079427 26325 net.cpp:91] Creating Layer relu1
    
    I0411 20:10:54.079430 26325 net.cpp:425] relu1 <- ip1
    
    I0411 20:10:54.079437 26325 net.cpp:386] relu1 -> ip1 (in-place)
    
    I0411 20:10:54.079659 26325 net.cpp:141] Setting up relu1
    
    I0411 20:10:54.079673 26325 net.cpp:148] Top shape: 64 500 (32000)
    
    I0411 20:10:54.079676 26325 net.cpp:156] Memory required for data: 5167360
    
    I0411 20:10:54.079681 26325 layer_factory.hpp:77] Creating layer ip2
    
    I0411 20:10:54.079689 26325 net.cpp:91] Creating Layer ip2
    
    I0411 20:10:54.079694 26325 net.cpp:425] ip2 <- ip1
    
    I0411 20:10:54.079701 26325 net.cpp:399] ip2 -> ip2
    
    I0411 20:10:54.080132 26325 net.cpp:141] Setting up ip2
    
    I0411 20:10:54.080149 26325 net.cpp:148] Top shape: 64 12 (768)
    
    I0411 20:10:54.080157 26325 net.cpp:156] Memory required for data: 5170432
    
    I0411 20:10:54.080166 26325 layer_factory.hpp:77] Creating layer loss
    
    I0411 20:10:54.080178 26325 net.cpp:91] Creating Layer loss
    
    I0411 20:10:54.080183 26325 net.cpp:425] loss <- ip2
    
    I0411 20:10:54.080188 26325 net.cpp:425] loss <- label
    
    I0411 20:10:54.080194 26325 net.cpp:399] loss -> loss
    
    I0411 20:10:54.080209 26325 layer_factory.hpp:77] Creating layer loss
    
    I0411 20:10:54.080411 26325 net.cpp:141] Setting up loss
    
    I0411 20:10:54.080421 26325 net.cpp:148] Top shape: (1)
    
    I0411 20:10:54.080426 26325 net.cpp:151]     with loss weight 1
    
    I0411 20:10:54.080443 26325 net.cpp:156] Memory required for data: 5170436
    
    I0411 20:10:54.080448 26325 net.cpp:217] loss needs backward computation.
    
    I0411 20:10:54.080453 26325 net.cpp:217] ip2 needs backward computation.
    
    I0411 20:10:54.080457 26325 net.cpp:217] relu1 needs backward computation.
    
    I0411 20:10:54.080462 26325 net.cpp:217] ip1 needs backward computation.
    
    I0411 20:10:54.080466 26325 net.cpp:217] pool2 needs backward computation.
    
    I0411 20:10:54.080471 26325 net.cpp:217] conv2 needs backward computation.
    
    I0411 20:10:54.080476 26325 net.cpp:217] pool1 needs backward computation.
    
    I0411 20:10:54.080479 26325 net.cpp:217] conv1 needs backward computation.
    
    I0411 20:10:54.080483 26325 net.cpp:219] mnist does not need backward computation.
    
    I0411 20:10:54.080488 26325 net.cpp:261] This network produces output loss
    
    I0411 20:10:54.080497 26325 net.cpp:274] Network initialization done.
    
    I0411 20:10:54.080797 26325 solver.cpp:181] Creating test net (#0) specified by net file: examples/mnist/lenet_train_test.prototxt
    
    I0411 20:10:54.080837 26325 net.cpp:313] The NetState phase (1) differed from the phase (0) specified by a rule in layer mnist
    
    I0411 20:10:54.080917 26325 net.cpp:49] Initializing net from parameters: 
    
    name: "LeNet"
    
    state {
    
      phase: TEST
    
    }
    
    layer {
    
      name: "mnist"
    
      type: "Data"
    
      top: "data"
    
      top: "label"
    
      include {
    
        phase: TEST
    
      }
    
      transform_param {
    
        scale: 0.00390625
    
      }
    
      data_param {
    
        source: "examples/mnist-test/digit_test_lmdb"
    
        batch_size: 100
    
        backend: LMDB
    
      }
    
    }
    
    layer {
    
      name: "conv1"
    
      type: "Convolution"
    
      bottom: "data"
    
      top: "conv1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 20
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool1"
    
      type: "Pooling"
    
      bottom: "conv1"
    
      top: "pool1"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "conv2"
    
      type: "Convolution"
    
      bottom: "pool1"
    
      top: "conv2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      convolution_param {
    
        num_output: 50
    
        kernel_size: 5
    
        stride: 1
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "pool2"
    
      type: "Pooling"
    
      bottom: "conv2"
    
      top: "pool2"
    
      pooling_param {
    
        pool: MAX
    
        kernel_size: 2
    
        stride: 2
    
      }
    
    }
    
    layer {
    
      name: "ip1"
    
      type: "InnerProduct"
    
      bottom: "pool2"
    
      top: "ip1"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 500
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "relu1"
    
      type: "ReLU"
    
      bottom: "ip1"
    
      top: "ip1"
    
    }
    
    layer {
    
      name: "ip2"
    
      type: "InnerProduct"
    
      bottom: "ip1"
    
      top: "ip2"
    
      param {
    
        lr_mult: 1
    
      }
    
      param {
    
        lr_mult: 2
    
      }
    
      inner_product_param {
    
        num_output: 12
    
        weight_filler {
    
          type: "xavier"
    
        }
    
        bias_filler {
    
          type: "constant"
    
        }
    
      }
    
    }
    
    layer {
    
      name: "accuracy"
    
      type: "Accuracy"
    
      bottom: "ip2"
    
      bottom: "label"
    
      top: "accuracy"
    
      include {
    
        phase: TEST
    
      }
    
    }
    
    layer {
    
      name: "loss"
    
      type: "SoftmaxWithLoss"
    
      bottom: "ip2"
    
      bottom: "label"
    
      top: "loss"
    
    }
    
    I0411 20:10:54.080974 26325 layer_factory.hpp:77] Creating layer mnist
    
    I0411 20:10:54.081065 26325 net.cpp:91] Creating Layer mnist
    
    I0411 20:10:54.081085 26325 net.cpp:399] mnist -> data
    
    I0411 20:10:54.081094 26325 net.cpp:399] mnist -> label
    
    I0411 20:10:54.081743 26336 db_lmdb.cpp:38] Opened lmdb examples/mnist-test/digit_test_lmdb
    
    I0411 20:10:54.081830 26325 data_layer.cpp:41] output data size: 100,1,28,28
    
    I0411 20:10:54.082656 26325 net.cpp:141] Setting up mnist
    
    I0411 20:10:54.082670 26325 net.cpp:148] Top shape: 100 1 28 28 (78400)
    
    I0411 20:10:54.082676 26325 net.cpp:148] Top shape: 100 (100)
    
    I0411 20:10:54.082681 26325 net.cpp:156] Memory required for data: 314000
    
    I0411 20:10:54.082686 26325 layer_factory.hpp:77] Creating layer label_mnist_1_split
    
    I0411 20:10:54.082697 26325 net.cpp:91] Creating Layer label_mnist_1_split
    
    I0411 20:10:54.082702 26325 net.cpp:425] label_mnist_1_split <- label
    
    I0411 20:10:54.082710 26325 net.cpp:399] label_mnist_1_split -> label_mnist_1_split_0
    
    I0411 20:10:54.082717 26325 net.cpp:399] label_mnist_1_split -> label_mnist_1_split_1
    
    I0411 20:10:54.082774 26325 net.cpp:141] Setting up label_mnist_1_split
    
    I0411 20:10:54.082783 26325 net.cpp:148] Top shape: 100 (100)
    
    I0411 20:10:54.082789 26325 net.cpp:148] Top shape: 100 (100)
    
    I0411 20:10:54.082793 26325 net.cpp:156] Memory required for data: 314800
    
    I0411 20:10:54.082798 26325 layer_factory.hpp:77] Creating layer conv1
    
    I0411 20:10:54.082808 26325 net.cpp:91] Creating Layer conv1
    
    I0411 20:10:54.082813 26325 net.cpp:425] conv1 <- data
    
    I0411 20:10:54.082819 26325 net.cpp:399] conv1 -> conv1
    
    I0411 20:10:54.083683 26325 net.cpp:141] Setting up conv1
    
    I0411 20:10:54.083698 26325 net.cpp:148] Top shape: 100 20 24 24 (1152000)
    
    I0411 20:10:54.083704 26325 net.cpp:156] Memory required for data: 4922800
    
    I0411 20:10:54.083714 26325 layer_factory.hpp:77] Creating layer pool1
    
    I0411 20:10:54.083730 26325 net.cpp:91] Creating Layer pool1
    
    I0411 20:10:54.083736 26325 net.cpp:425] pool1 <- conv1
    
    I0411 20:10:54.083742 26325 net.cpp:399] pool1 -> pool1
    
    I0411 20:10:54.083777 26325 net.cpp:141] Setting up pool1
    
    I0411 20:10:54.083786 26325 net.cpp:148] Top shape: 100 20 12 12 (288000)
    
    I0411 20:10:54.083792 26325 net.cpp:156] Memory required for data: 6074800
    
    I0411 20:10:54.083797 26325 layer_factory.hpp:77] Creating layer conv2
    
    I0411 20:10:54.083807 26325 net.cpp:91] Creating Layer conv2
    
    I0411 20:10:54.083813 26325 net.cpp:425] conv2 <- pool1
    
    I0411 20:10:54.083822 26325 net.cpp:399] conv2 -> conv2
    
    I0411 20:10:54.085094 26325 net.cpp:141] Setting up conv2
    
    I0411 20:10:54.085116 26325 net.cpp:148] Top shape: 100 50 8 8 (320000)
    
    I0411 20:10:54.085126 26325 net.cpp:156] Memory required for data: 7354800
    
    I0411 20:10:54.085139 26325 layer_factory.hpp:77] Creating layer pool2
    
    I0411 20:10:54.085155 26325 net.cpp:91] Creating Layer pool2
    
    I0411 20:10:54.085161 26325 net.cpp:425] pool2 <- conv2
    
    I0411 20:10:54.085170 26325 net.cpp:399] pool2 -> pool2
    
    I0411 20:10:54.085216 26325 net.cpp:141] Setting up pool2
    
    I0411 20:10:54.085230 26325 net.cpp:148] Top shape: 100 50 4 4 (80000)
    
    I0411 20:10:54.085239 26325 net.cpp:156] Memory required for data: 7674800
    
    I0411 20:10:54.085245 26325 layer_factory.hpp:77] Creating layer ip1
    
    I0411 20:10:54.085255 26325 net.cpp:91] Creating Layer ip1
    
    I0411 20:10:54.085263 26325 net.cpp:425] ip1 <- pool2
    
    I0411 20:10:54.085283 26325 net.cpp:399] ip1 -> ip1
    
    I0411 20:10:54.088999 26325 net.cpp:141] Setting up ip1
    
    I0411 20:10:54.089022 26325 net.cpp:148] Top shape: 100 500 (50000)
    
    I0411 20:10:54.089030 26325 net.cpp:156] Memory required for data: 7874800
    
    I0411 20:10:54.089042 26325 layer_factory.hpp:77] Creating layer relu1
    
    I0411 20:10:54.089052 26325 net.cpp:91] Creating Layer relu1
    
    I0411 20:10:54.089058 26325 net.cpp:425] relu1 <- ip1
    
    I0411 20:10:54.089068 26325 net.cpp:386] relu1 -> ip1 (in-place)
    
    I0411 20:10:54.089372 26325 net.cpp:141] Setting up relu1
    
    I0411 20:10:54.089390 26325 net.cpp:148] Top shape: 100 500 (50000)
    
    I0411 20:10:54.089396 26325 net.cpp:156] Memory required for data: 8074800
    
    I0411 20:10:54.089402 26325 layer_factory.hpp:77] Creating layer ip2
    
    I0411 20:10:54.089416 26325 net.cpp:91] Creating Layer ip2
    
    I0411 20:10:54.089422 26325 net.cpp:425] ip2 <- ip1
    
    I0411 20:10:54.089432 26325 net.cpp:399] ip2 -> ip2
    
    I0411 20:10:54.089603 26325 net.cpp:141] Setting up ip2
    
    I0411 20:10:54.089615 26325 net.cpp:148] Top shape: 100 12 (1200)
    
    I0411 20:10:54.089622 26325 net.cpp:156] Memory required for data: 8079600
    
    I0411 20:10:54.089630 26325 layer_factory.hpp:77] Creating layer ip2_ip2_0_split
    
    I0411 20:10:54.089639 26325 net.cpp:91] Creating Layer ip2_ip2_0_split
    
    I0411 20:10:54.089645 26325 net.cpp:425] ip2_ip2_0_split <- ip2
    
    I0411 20:10:54.089654 26325 net.cpp:399] ip2_ip2_0_split -> ip2_ip2_0_split_0
    
    I0411 20:10:54.089664 26325 net.cpp:399] ip2_ip2_0_split -> ip2_ip2_0_split_1
    
    I0411 20:10:54.089705 26325 net.cpp:141] Setting up ip2_ip2_0_split
    
    I0411 20:10:54.089715 26325 net.cpp:148] Top shape: 100 12 (1200)
    
    I0411 20:10:54.089721 26325 net.cpp:148] Top shape: 100 12 (1200)
    
    I0411 20:10:54.089727 26325 net.cpp:156] Memory required for data: 8089200
    
    I0411 20:10:54.089733 26325 layer_factory.hpp:77] Creating layer accuracy
    
    I0411 20:10:54.089745 26325 net.cpp:91] Creating Layer accuracy
    
    I0411 20:10:54.089752 26325 net.cpp:425] accuracy <- ip2_ip2_0_split_0
    
    I0411 20:10:54.089759 26325 net.cpp:425] accuracy <- label_mnist_1_split_0
    
    I0411 20:10:54.089769 26325 net.cpp:399] accuracy -> accuracy
    
    I0411 20:10:54.089783 26325 net.cpp:141] Setting up accuracy
    
    I0411 20:10:54.089792 26325 net.cpp:148] Top shape: (1)
    
    I0411 20:10:54.089797 26325 net.cpp:156] Memory required for data: 8089204
    
    I0411 20:10:54.089802 26325 layer_factory.hpp:77] Creating layer loss
    
    I0411 20:10:54.089810 26325 net.cpp:91] Creating Layer loss
    
    I0411 20:10:54.089817 26325 net.cpp:425] loss <- ip2_ip2_0_split_1
    
    I0411 20:10:54.089823 26325 net.cpp:425] loss <- label_mnist_1_split_1
    
    I0411 20:10:54.089836 26325 net.cpp:399] loss -> loss
    
    I0411 20:10:54.089857 26325 layer_factory.hpp:77] Creating layer loss
    
    I0411 20:10:54.090514 26325 net.cpp:141] Setting up loss
    
    I0411 20:10:54.090531 26325 net.cpp:148] Top shape: (1)
    
    I0411 20:10:54.090538 26325 net.cpp:151]     with loss weight 1
    
    I0411 20:10:54.090548 26325 net.cpp:156] Memory required for data: 8089208
    
    I0411 20:10:54.090555 26325 net.cpp:217] loss needs backward computation.
    
    I0411 20:10:54.090562 26325 net.cpp:219] accuracy does not need backward computation.
    
    I0411 20:10:54.090569 26325 net.cpp:217] ip2_ip2_0_split needs backward computation.
    
    I0411 20:10:54.090574 26325 net.cpp:217] ip2 needs backward computation.
    
    I0411 20:10:54.090580 26325 net.cpp:217] relu1 needs backward computation.
    
    I0411 20:10:54.090585 26325 net.cpp:217] ip1 needs backward computation.
    
    I0411 20:10:54.090592 26325 net.cpp:217] pool2 needs backward computation.
    
    I0411 20:10:54.090597 26325 net.cpp:217] conv2 needs backward computation.
    
    I0411 20:10:54.090605 26325 net.cpp:217] pool1 needs backward computation.
    
    I0411 20:10:54.090610 26325 net.cpp:217] conv1 needs backward computation.
    
    I0411 20:10:54.090616 26325 net.cpp:219] label_mnist_1_split does not need backward computation.
    
    I0411 20:10:54.090623 26325 net.cpp:219] mnist does not need backward computation.
    
    I0411 20:10:54.090628 26325 net.cpp:261] This network produces output accuracy
    
    I0411 20:10:54.090636 26325 net.cpp:261] This network produces output loss
    
    I0411 20:10:54.090651 26325 net.cpp:274] Network initialization done.
    
    I0411 20:10:54.090711 26325 solver.cpp:60] Solver scaffolding done.
    
    I0411 20:10:54.091017 26325 caffe.cpp:219] Starting Optimization
    
    I0411 20:10:54.091029 26325 solver.cpp:279] Solving LeNet
    
    I0411 20:10:54.091035 26325 solver.cpp:280] Learning Rate Policy: inv
    
    I0411 20:10:54.091449 26325 solver.cpp:337] Iteration 0, Testing net (#0)
    
    I0411 20:10:54.267302 26325 solver.cpp:404]     Test net output #0: accuracy = 0
    
    I0411 20:10:54.267335 26325 solver.cpp:404]     Test net output #1: loss = 2.51464 (* 1 = 2.51464 loss)
    
    I0411 20:10:54.269503 26325 solver.cpp:228] Iteration 0, loss = 2.53388
    
    I0411 20:10:54.269525 26325 solver.cpp:244]     Train net output #0: loss = 2.53388 (* 1 = 2.53388 loss)
    
    I0411 20:10:54.269541 26325 sgd_solver.cpp:106] Iteration 0, lr = 0.01
    
    I0411 20:10:54.747952 26325 solver.cpp:228] Iteration 100, loss = 0.0301832
    
    I0411 20:10:54.747992 26325 solver.cpp:244]     Train net output #0: loss = 0.0301832 (* 1 = 0.0301832 loss)
    
    I0411 20:10:54.748002 26325 sgd_solver.cpp:106] Iteration 100, lr = 0.00992565
    
    I0411 20:10:55.225338 26325 solver.cpp:228] Iteration 200, loss = 0.0084664
    
    I0411 20:10:55.225378 26325 solver.cpp:244]     Train net output #0: loss = 0.00846638 (* 1 = 0.00846638 loss)
    
    I0411 20:10:55.225386 26325 sgd_solver.cpp:106] Iteration 200, lr = 0.00985258
    
    I0411 20:10:55.702563 26325 solver.cpp:228] Iteration 300, loss = 0.0032292
    
    I0411 20:10:55.702605 26325 solver.cpp:244]     Train net output #0: loss = 0.00322917 (* 1 = 0.00322917 loss)
    
    I0411 20:10:55.702613 26325 sgd_solver.cpp:106] Iteration 300, lr = 0.00978075
    
    I0411 20:10:56.179253 26325 solver.cpp:228] Iteration 400, loss = 0.00241019
    
    I0411 20:10:56.179291 26325 solver.cpp:244]     Train net output #0: loss = 0.00241017 (* 1 = 0.00241017 loss)
    
    I0411 20:10:56.179299 26325 sgd_solver.cpp:106] Iteration 400, lr = 0.00971013
    
    I0411 20:10:56.650161 26325 solver.cpp:337] Iteration 500, Testing net (#0)
    
    I0411 20:10:56.817160 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:10:56.817193 26325 solver.cpp:404]     Test net output #1: loss = 0.00633578 (* 1 = 0.00633578 loss)
    
    I0411 20:10:56.818384 26325 solver.cpp:228] Iteration 500, loss = 0.00249958
    
    I0411 20:10:56.818402 26325 solver.cpp:244]     Train net output #0: loss = 0.00249956 (* 1 = 0.00249956 loss)
    
    I0411 20:10:56.818411 26325 sgd_solver.cpp:106] Iteration 500, lr = 0.00964069
    
    I0411 20:10:57.295064 26325 solver.cpp:228] Iteration 600, loss = 0.00521553
    
    I0411 20:10:57.295104 26325 solver.cpp:244]     Train net output #0: loss = 0.00521551 (* 1 = 0.00521551 loss)
    
    I0411 20:10:57.295120 26325 sgd_solver.cpp:106] Iteration 600, lr = 0.0095724
    
    I0411 20:10:57.768954 26325 solver.cpp:228] Iteration 700, loss = 0.000742974
    
    I0411 20:10:57.768993 26325 solver.cpp:244]     Train net output #0: loss = 0.000742956 (* 1 = 0.000742956 loss)
    
    I0411 20:10:57.769001 26325 sgd_solver.cpp:106] Iteration 700, lr = 0.00950522
    
    I0411 20:10:58.244019 26325 solver.cpp:228] Iteration 800, loss = 0.00139234
    
    I0411 20:10:58.244060 26325 solver.cpp:244]     Train net output #0: loss = 0.00139232 (* 1 = 0.00139232 loss)
    
    I0411 20:10:58.244067 26325 sgd_solver.cpp:106] Iteration 800, lr = 0.00943913
    
    I0411 20:10:58.717504 26325 solver.cpp:228] Iteration 900, loss = 0.00333919
    
    I0411 20:10:58.717543 26325 solver.cpp:244]     Train net output #0: loss = 0.00333917 (* 1 = 0.00333917 loss)
    
    I0411 20:10:58.717551 26325 sgd_solver.cpp:106] Iteration 900, lr = 0.00937411
    
    I0411 20:10:59.188133 26325 solver.cpp:337] Iteration 1000, Testing net (#0)
    
    I0411 20:10:59.354576 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:10:59.354611 26325 solver.cpp:404]     Test net output #1: loss = 0.00971376 (* 1 = 0.00971376 loss)
    
    I0411 20:10:59.355782 26325 solver.cpp:228] Iteration 1000, loss = 0.00141859
    
    I0411 20:10:59.355799 26325 solver.cpp:244]     Train net output #0: loss = 0.00141858 (* 1 = 0.00141858 loss)
    
    I0411 20:10:59.355808 26325 sgd_solver.cpp:106] Iteration 1000, lr = 0.00931012
    
    I0411 20:10:59.830029 26325 solver.cpp:228] Iteration 1100, loss = 0.000765667
    
    I0411 20:10:59.830067 26325 solver.cpp:244]     Train net output #0: loss = 0.000765649 (* 1 = 0.000765649 loss)
    
    I0411 20:10:59.830075 26325 sgd_solver.cpp:106] Iteration 1100, lr = 0.00924715
    
    I0411 20:11:00.305390 26325 solver.cpp:228] Iteration 1200, loss = 0.00107148
    
    I0411 20:11:00.305439 26325 solver.cpp:244]     Train net output #0: loss = 0.00107146 (* 1 = 0.00107146 loss)
    
    I0411 20:11:00.305449 26325 sgd_solver.cpp:106] Iteration 1200, lr = 0.00918515
    
    I0411 20:11:00.779052 26325 solver.cpp:228] Iteration 1300, loss = 0.00054331
    
    I0411 20:11:00.779094 26325 solver.cpp:244]     Train net output #0: loss = 0.000543291 (* 1 = 0.000543291 loss)
    
    I0411 20:11:00.779392 26325 sgd_solver.cpp:106] Iteration 1300, lr = 0.00912412
    
    I0411 20:11:01.254827 26325 solver.cpp:228] Iteration 1400, loss = 0.00086254
    
    I0411 20:11:01.254870 26325 solver.cpp:244]     Train net output #0: loss = 0.000862521 (* 1 = 0.000862521 loss)
    
    I0411 20:11:01.254880 26325 sgd_solver.cpp:106] Iteration 1400, lr = 0.00906403
    
    I0411 20:11:01.725662 26325 solver.cpp:337] Iteration 1500, Testing net (#0)
    
    I0411 20:11:01.893404 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:01.893447 26325 solver.cpp:404]     Test net output #1: loss = 0.0117362 (* 1 = 0.0117362 loss)
    
    I0411 20:11:01.894623 26325 solver.cpp:228] Iteration 1500, loss = 0.000553097
    
    I0411 20:11:01.894642 26325 solver.cpp:244]     Train net output #0: loss = 0.000553078 (* 1 = 0.000553078 loss)
    
    I0411 20:11:01.894651 26325 sgd_solver.cpp:106] Iteration 1500, lr = 0.00900485
    
    I0411 20:11:02.375581 26325 solver.cpp:228] Iteration 1600, loss = 0.00060225
    
    I0411 20:11:02.375622 26325 solver.cpp:244]     Train net output #0: loss = 0.000602231 (* 1 = 0.000602231 loss)
    
    I0411 20:11:02.375630 26325 sgd_solver.cpp:106] Iteration 1600, lr = 0.00894657
    
    I0411 20:11:02.856406 26325 solver.cpp:228] Iteration 1700, loss = 0.00102158
    
    I0411 20:11:02.856464 26325 solver.cpp:244]     Train net output #0: loss = 0.00102156 (* 1 = 0.00102156 loss)
    
    I0411 20:11:02.856473 26325 sgd_solver.cpp:106] Iteration 1700, lr = 0.00888916
    
    I0411 20:11:03.338832 26325 solver.cpp:228] Iteration 1800, loss = 0.000260631
    
    I0411 20:11:03.338874 26325 solver.cpp:244]     Train net output #0: loss = 0.000260612 (* 1 = 0.000260612 loss)
    
    I0411 20:11:03.338883 26325 sgd_solver.cpp:106] Iteration 1800, lr = 0.0088326
    
    I0411 20:11:03.816126 26325 solver.cpp:228] Iteration 1900, loss = 0.00146905
    
    I0411 20:11:03.816167 26325 solver.cpp:244]     Train net output #0: loss = 0.00146903 (* 1 = 0.00146903 loss)
    
    I0411 20:11:03.816176 26325 sgd_solver.cpp:106] Iteration 1900, lr = 0.00877687
    
    I0411 20:11:04.289855 26325 solver.cpp:337] Iteration 2000, Testing net (#0)
    
    I0411 20:11:04.466187 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:04.466223 26325 solver.cpp:404]     Test net output #1: loss = 0.0113922 (* 1 = 0.0113922 loss)
    
    I0411 20:11:04.467491 26325 solver.cpp:228] Iteration 2000, loss = 0.00158365
    
    I0411 20:11:04.467510 26325 solver.cpp:244]     Train net output #0: loss = 0.00158363 (* 1 = 0.00158363 loss)
    
    I0411 20:11:04.467520 26325 sgd_solver.cpp:106] Iteration 2000, lr = 0.00872196
    
    I0411 20:11:04.966814 26325 solver.cpp:228] Iteration 2100, loss = 0.00147849
    
    I0411 20:11:04.966856 26325 solver.cpp:244]     Train net output #0: loss = 0.00147848 (* 1 = 0.00147848 loss)
    
    I0411 20:11:04.966866 26325 sgd_solver.cpp:106] Iteration 2100, lr = 0.00866784
    
    I0411 20:11:05.454147 26325 solver.cpp:228] Iteration 2200, loss = 0.00104699
    
    I0411 20:11:05.454205 26325 solver.cpp:244]     Train net output #0: loss = 0.00104698 (* 1 = 0.00104698 loss)
    
    I0411 20:11:05.454213 26325 sgd_solver.cpp:106] Iteration 2200, lr = 0.0086145
    
    I0411 20:11:05.953001 26325 solver.cpp:228] Iteration 2300, loss = 0.0012614
    
    I0411 20:11:05.953055 26325 solver.cpp:244]     Train net output #0: loss = 0.00126138 (* 1 = 0.00126138 loss)
    
    I0411 20:11:05.953068 26325 sgd_solver.cpp:106] Iteration 2300, lr = 0.00856192
    
    I0411 20:11:06.440332 26325 solver.cpp:228] Iteration 2400, loss = 0.000167933
    
    I0411 20:11:06.440373 26325 solver.cpp:244]     Train net output #0: loss = 0.000167916 (* 1 = 0.000167916 loss)
    
    I0411 20:11:06.440382 26325 sgd_solver.cpp:106] Iteration 2400, lr = 0.00851008
    
    I0411 20:11:06.931771 26325 solver.cpp:337] Iteration 2500, Testing net (#0)
    
    I0411 20:11:07.102743 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:07.102779 26325 solver.cpp:404]     Test net output #1: loss = 0.0116974 (* 1 = 0.0116974 loss)
    
    I0411 20:11:07.104343 26325 solver.cpp:228] Iteration 2500, loss = 0.0013398
    
    I0411 20:11:07.104365 26325 solver.cpp:244]     Train net output #0: loss = 0.00133978 (* 1 = 0.00133978 loss)
    
    I0411 20:11:07.104377 26325 sgd_solver.cpp:106] Iteration 2500, lr = 0.00845897
    
    I0411 20:11:07.586928 26325 solver.cpp:228] Iteration 2600, loss = 0.000558019
    
    I0411 20:11:07.586969 26325 solver.cpp:244]     Train net output #0: loss = 0.000558002 (* 1 = 0.000558002 loss)
    
    I0411 20:11:07.586978 26325 sgd_solver.cpp:106] Iteration 2600, lr = 0.00840857
    
    I0411 20:11:08.065929 26325 solver.cpp:228] Iteration 2700, loss = 0.000280704
    
    I0411 20:11:08.065973 26325 solver.cpp:244]     Train net output #0: loss = 0.000280687 (* 1 = 0.000280687 loss)
    
    I0411 20:11:08.065984 26325 sgd_solver.cpp:106] Iteration 2700, lr = 0.00835886
    
    I0411 20:11:08.547049 26325 solver.cpp:228] Iteration 2800, loss = 0.0013896
    
    I0411 20:11:08.547091 26325 solver.cpp:244]     Train net output #0: loss = 0.00138958 (* 1 = 0.00138958 loss)
    
    I0411 20:11:08.547101 26325 sgd_solver.cpp:106] Iteration 2800, lr = 0.00830984
    
    I0411 20:11:09.028833 26325 solver.cpp:228] Iteration 2900, loss = 0.000942562
    
    I0411 20:11:09.028875 26325 solver.cpp:244]     Train net output #0: loss = 0.000942545 (* 1 = 0.000942545 loss)
    
    I0411 20:11:09.028883 26325 sgd_solver.cpp:106] Iteration 2900, lr = 0.00826148
    
    I0411 20:11:09.506394 26325 solver.cpp:337] Iteration 3000, Testing net (#0)
    
    I0411 20:11:09.675285 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:09.675320 26325 solver.cpp:404]     Test net output #1: loss = 0.0132009 (* 1 = 0.0132009 loss)
    
    I0411 20:11:09.676492 26325 solver.cpp:228] Iteration 3000, loss = 0.00374372
    
    I0411 20:11:09.676512 26325 solver.cpp:244]     Train net output #0: loss = 0.00374371 (* 1 = 0.00374371 loss)
    
    I0411 20:11:09.676522 26325 sgd_solver.cpp:106] Iteration 3000, lr = 0.00821377
    
    I0411 20:11:10.188333 26325 solver.cpp:228] Iteration 3100, loss = 0.000790801
    
    I0411 20:11:10.188385 26325 solver.cpp:244]     Train net output #0: loss = 0.000790783 (* 1 = 0.000790783 loss)
    
    I0411 20:11:10.188395 26325 sgd_solver.cpp:106] Iteration 3100, lr = 0.0081667
    
    I0411 20:11:10.685463 26325 solver.cpp:228] Iteration 3200, loss = 0.000262422
    
    I0411 20:11:10.685523 26325 solver.cpp:244]     Train net output #0: loss = 0.000262405 (* 1 = 0.000262405 loss)
    
    I0411 20:11:10.685533 26325 sgd_solver.cpp:106] Iteration 3200, lr = 0.00812025
    
    I0411 20:11:11.164309 26325 solver.cpp:228] Iteration 3300, loss = 0.000230147
    
    I0411 20:11:11.164361 26325 solver.cpp:244]     Train net output #0: loss = 0.00023013 (* 1 = 0.00023013 loss)
    
    I0411 20:11:11.164371 26325 sgd_solver.cpp:106] Iteration 3300, lr = 0.00807442
    
    I0411 20:11:11.650818 26325 solver.cpp:228] Iteration 3400, loss = 0.000403222
    
    I0411 20:11:11.650863 26325 solver.cpp:244]     Train net output #0: loss = 0.000403205 (* 1 = 0.000403205 loss)
    
    I0411 20:11:11.650873 26325 sgd_solver.cpp:106] Iteration 3400, lr = 0.00802918
    
    I0411 20:11:12.150279 26325 solver.cpp:337] Iteration 3500, Testing net (#0)
    
    I0411 20:11:12.327482 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:12.327518 26325 solver.cpp:404]     Test net output #1: loss = 0.0137085 (* 1 = 0.0137085 loss)
    
    I0411 20:11:12.328698 26325 solver.cpp:228] Iteration 3500, loss = 0.000732417
    
    I0411 20:11:12.328717 26325 solver.cpp:244]     Train net output #0: loss = 0.0007324 (* 1 = 0.0007324 loss)
    
    I0411 20:11:12.328727 26325 sgd_solver.cpp:106] Iteration 3500, lr = 0.00798454
    
    I0411 20:11:12.813639 26325 solver.cpp:228] Iteration 3600, loss = 0.000439515
    
    I0411 20:11:12.813684 26325 solver.cpp:244]     Train net output #0: loss = 0.000439498 (* 1 = 0.000439498 loss)
    
    I0411 20:11:12.813694 26325 sgd_solver.cpp:106] Iteration 3600, lr = 0.00794046
    
    I0411 20:11:13.297883 26325 solver.cpp:228] Iteration 3700, loss = 0.000808986
    
    I0411 20:11:13.297922 26325 solver.cpp:244]     Train net output #0: loss = 0.000808969 (* 1 = 0.000808969 loss)
    
    I0411 20:11:13.297930 26325 sgd_solver.cpp:106] Iteration 3700, lr = 0.00789695
    
    I0411 20:11:13.783499 26325 solver.cpp:228] Iteration 3800, loss = 0.000267728
    
    I0411 20:11:13.783550 26325 solver.cpp:244]     Train net output #0: loss = 0.000267711 (* 1 = 0.000267711 loss)
    
    I0411 20:11:13.783558 26325 sgd_solver.cpp:106] Iteration 3800, lr = 0.007854
    
    I0411 20:11:14.271435 26325 solver.cpp:228] Iteration 3900, loss = 0.000456092
    
    I0411 20:11:14.271483 26325 solver.cpp:244]     Train net output #0: loss = 0.000456075 (* 1 = 0.000456075 loss)
    
    I0411 20:11:14.271492 26325 sgd_solver.cpp:106] Iteration 3900, lr = 0.00781158
    
    I0411 20:11:14.750762 26325 solver.cpp:337] Iteration 4000, Testing net (#0)
    
    I0411 20:11:14.920282 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:14.920317 26325 solver.cpp:404]     Test net output #1: loss = 0.0131978 (* 1 = 0.0131978 loss)
    
    I0411 20:11:14.921478 26325 solver.cpp:228] Iteration 4000, loss = 0.000209671
    
    I0411 20:11:14.921495 26325 solver.cpp:244]     Train net output #0: loss = 0.000209654 (* 1 = 0.000209654 loss)
    
    I0411 20:11:14.921504 26325 sgd_solver.cpp:106] Iteration 4000, lr = 0.0077697
    
    I0411 20:11:15.398669 26325 solver.cpp:228] Iteration 4100, loss = 0.000547461
    
    I0411 20:11:15.398710 26325 solver.cpp:244]     Train net output #0: loss = 0.000547443 (* 1 = 0.000547443 loss)
    
    I0411 20:11:15.398718 26325 sgd_solver.cpp:106] Iteration 4100, lr = 0.00772833
    
    I0411 20:11:15.875399 26325 solver.cpp:228] Iteration 4200, loss = 0.000726168
    
    I0411 20:11:15.875442 26325 solver.cpp:244]     Train net output #0: loss = 0.00072615 (* 1 = 0.00072615 loss)
    
    I0411 20:11:15.875450 26325 sgd_solver.cpp:106] Iteration 4200, lr = 0.00768748
    
    I0411 20:11:16.368588 26325 solver.cpp:228] Iteration 4300, loss = 0.000150464
    
    I0411 20:11:16.368633 26325 solver.cpp:244]     Train net output #0: loss = 0.000150446 (* 1 = 0.000150446 loss)
    
    I0411 20:11:16.368643 26325 sgd_solver.cpp:106] Iteration 4300, lr = 0.00764712
    
    I0411 20:11:16.851346 26325 solver.cpp:228] Iteration 4400, loss = 0.000245828
    
    I0411 20:11:16.851398 26325 solver.cpp:244]     Train net output #0: loss = 0.00024581 (* 1 = 0.00024581 loss)
    
    I0411 20:11:16.851407 26325 sgd_solver.cpp:106] Iteration 4400, lr = 0.00760726
    
    I0411 20:11:17.333400 26325 solver.cpp:337] Iteration 4500, Testing net (#0)
    
    I0411 20:11:17.500576 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:17.500627 26325 solver.cpp:404]     Test net output #1: loss = 0.0144803 (* 1 = 0.0144803 loss)
    
    I0411 20:11:17.501811 26325 solver.cpp:228] Iteration 4500, loss = 0.000780273
    
    I0411 20:11:17.501828 26325 solver.cpp:244]     Train net output #0: loss = 0.000780255 (* 1 = 0.000780255 loss)
    
    I0411 20:11:17.501837 26325 sgd_solver.cpp:106] Iteration 4500, lr = 0.00756788
    
    I0411 20:11:17.978284 26325 solver.cpp:228] Iteration 4600, loss = 0.0010362
    
    I0411 20:11:17.978322 26325 solver.cpp:244]     Train net output #0: loss = 0.00103618 (* 1 = 0.00103618 loss)
    
    I0411 20:11:17.978330 26325 sgd_solver.cpp:106] Iteration 4600, lr = 0.00752897
    
    I0411 20:11:18.452605 26325 solver.cpp:228] Iteration 4700, loss = 0.00053596
    
    I0411 20:11:18.452644 26325 solver.cpp:244]     Train net output #0: loss = 0.000535942 (* 1 = 0.000535942 loss)
    
    I0411 20:11:18.452652 26325 sgd_solver.cpp:106] Iteration 4700, lr = 0.00749052
    
    I0411 20:11:18.947947 26325 solver.cpp:228] Iteration 4800, loss = 0.000870918
    
    I0411 20:11:18.947988 26325 solver.cpp:244]     Train net output #0: loss = 0.000870901 (* 1 = 0.000870901 loss)
    
    I0411 20:11:18.947996 26325 sgd_solver.cpp:106] Iteration 4800, lr = 0.00745253
    
    I0411 20:11:19.423763 26325 solver.cpp:228] Iteration 4900, loss = 0.000711674
    
    I0411 20:11:19.423806 26325 solver.cpp:244]     Train net output #0: loss = 0.000711657 (* 1 = 0.000711657 loss)
    
    I0411 20:11:19.423815 26325 sgd_solver.cpp:106] Iteration 4900, lr = 0.00741498
    
    I0411 20:11:19.893707 26325 solver.cpp:454] Snapshotting to binary proto file examples/mnist/lenet_iter_5000.caffemodel
    
    I0411 20:11:19.903385 26325 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_5000.solverstate
    
    I0411 20:11:19.905833 26325 solver.cpp:337] Iteration 5000, Testing net (#0)
    
    I0411 20:11:20.070997 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:20.071032 26325 solver.cpp:404]     Test net output #1: loss = 0.0146861 (* 1 = 0.0146861 loss)
    
    I0411 20:11:20.072247 26325 solver.cpp:228] Iteration 5000, loss = 0.00126036
    
    I0411 20:11:20.072264 26325 solver.cpp:244]     Train net output #0: loss = 0.00126034 (* 1 = 0.00126034 loss)
    
    I0411 20:11:20.072274 26325 sgd_solver.cpp:106] Iteration 5000, lr = 0.00737788
    
    I0411 20:11:20.552392 26325 solver.cpp:228] Iteration 5100, loss = 0.000418031
    
    I0411 20:11:20.552430 26325 solver.cpp:244]     Train net output #0: loss = 0.000418013 (* 1 = 0.000418013 loss)
    
    I0411 20:11:20.552438 26325 sgd_solver.cpp:106] Iteration 5100, lr = 0.0073412
    
    I0411 20:11:21.030954 26325 solver.cpp:228] Iteration 5200, loss = 0.000680887
    
    I0411 20:11:21.030999 26325 solver.cpp:244]     Train net output #0: loss = 0.00068087 (* 1 = 0.00068087 loss)
    
    I0411 20:11:21.031296 26325 sgd_solver.cpp:106] Iteration 5200, lr = 0.00730495
    
    I0411 20:11:21.507069 26325 solver.cpp:228] Iteration 5300, loss = 0.00132878
    
    I0411 20:11:21.507122 26325 solver.cpp:244]     Train net output #0: loss = 0.00132876 (* 1 = 0.00132876 loss)
    
    I0411 20:11:21.507130 26325 sgd_solver.cpp:106] Iteration 5300, lr = 0.00726911
    
    I0411 20:11:21.981031 26325 solver.cpp:228] Iteration 5400, loss = 0.000751449
    
    I0411 20:11:21.981071 26325 solver.cpp:244]     Train net output #0: loss = 0.000751432 (* 1 = 0.000751432 loss)
    
    I0411 20:11:21.981079 26325 sgd_solver.cpp:106] Iteration 5400, lr = 0.00723368
    
    I0411 20:11:22.451010 26325 solver.cpp:337] Iteration 5500, Testing net (#0)
    
    I0411 20:11:22.617447 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:22.617480 26325 solver.cpp:404]     Test net output #1: loss = 0.0141701 (* 1 = 0.0141701 loss)
    
    I0411 20:11:22.618643 26325 solver.cpp:228] Iteration 5500, loss = 0.000439743
    
    I0411 20:11:22.618661 26325 solver.cpp:244]     Train net output #0: loss = 0.000439726 (* 1 = 0.000439726 loss)
    
    I0411 20:11:22.618670 26325 sgd_solver.cpp:106] Iteration 5500, lr = 0.00719865
    
    I0411 20:11:23.092772 26325 solver.cpp:228] Iteration 5600, loss = 0.000677755
    
    I0411 20:11:23.092813 26325 solver.cpp:244]     Train net output #0: loss = 0.000677738 (* 1 = 0.000677738 loss)
    
    I0411 20:11:23.092838 26325 sgd_solver.cpp:106] Iteration 5600, lr = 0.00716402
    
    I0411 20:11:23.568708 26325 solver.cpp:228] Iteration 5700, loss = 0.000768955
    
    I0411 20:11:23.568749 26325 solver.cpp:244]     Train net output #0: loss = 0.000768938 (* 1 = 0.000768938 loss)
    
    I0411 20:11:23.568758 26325 sgd_solver.cpp:106] Iteration 5700, lr = 0.00712977
    
    I0411 20:11:24.045614 26325 solver.cpp:228] Iteration 5800, loss = 0.000597041
    
    I0411 20:11:24.045729 26325 solver.cpp:244]     Train net output #0: loss = 0.000597025 (* 1 = 0.000597025 loss)
    
    I0411 20:11:24.045740 26325 sgd_solver.cpp:106] Iteration 5800, lr = 0.0070959
    
    I0411 20:11:24.522313 26325 solver.cpp:228] Iteration 5900, loss = 0.000519103
    
    I0411 20:11:24.522354 26325 solver.cpp:244]     Train net output #0: loss = 0.000519086 (* 1 = 0.000519086 loss)
    
    I0411 20:11:24.522362 26325 sgd_solver.cpp:106] Iteration 5900, lr = 0.0070624
    
    I0411 20:11:24.994959 26325 solver.cpp:337] Iteration 6000, Testing net (#0)
    
    I0411 20:11:25.160990 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:25.161021 26325 solver.cpp:404]     Test net output #1: loss = 0.0147472 (* 1 = 0.0147472 loss)
    
    I0411 20:11:25.162170 26325 solver.cpp:228] Iteration 6000, loss = 0.000468573
    
    I0411 20:11:25.162189 26325 solver.cpp:244]     Train net output #0: loss = 0.000468556 (* 1 = 0.000468556 loss)
    
    I0411 20:11:25.162197 26325 sgd_solver.cpp:106] Iteration 6000, lr = 0.00702927
    
    I0411 20:11:25.638268 26325 solver.cpp:228] Iteration 6100, loss = 0.00025703
    
    I0411 20:11:25.638309 26325 solver.cpp:244]     Train net output #0: loss = 0.000257013 (* 1 = 0.000257013 loss)
    
    I0411 20:11:25.638317 26325 sgd_solver.cpp:106] Iteration 6100, lr = 0.0069965
    
    I0411 20:11:26.115149 26325 solver.cpp:228] Iteration 6200, loss = 0.000301074
    
    I0411 20:11:26.115206 26325 solver.cpp:244]     Train net output #0: loss = 0.000301057 (* 1 = 0.000301057 loss)
    
    I0411 20:11:26.115409 26325 sgd_solver.cpp:106] Iteration 6200, lr = 0.00696408
    
    I0411 20:11:26.592195 26325 solver.cpp:228] Iteration 6300, loss = 0.0012083
    
    I0411 20:11:26.592236 26325 solver.cpp:244]     Train net output #0: loss = 0.00120829 (* 1 = 0.00120829 loss)
    
    I0411 20:11:26.592243 26325 sgd_solver.cpp:106] Iteration 6300, lr = 0.00693201
    
    I0411 20:11:27.070199 26325 solver.cpp:228] Iteration 6400, loss = 0.00056831
    
    I0411 20:11:27.070240 26325 solver.cpp:244]     Train net output #0: loss = 0.000568293 (* 1 = 0.000568293 loss)
    
    I0411 20:11:27.070247 26325 sgd_solver.cpp:106] Iteration 6400, lr = 0.00690029
    
    I0411 20:11:27.542577 26325 solver.cpp:337] Iteration 6500, Testing net (#0)
    
    I0411 20:11:27.709849 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977301
    
    I0411 20:11:27.709892 26325 solver.cpp:404]     Test net output #1: loss = 0.0163178 (* 1 = 0.0163178 loss)
    
    I0411 20:11:27.711072 26325 solver.cpp:228] Iteration 6500, loss = 0.000781759
    
    I0411 20:11:27.711091 26325 solver.cpp:244]     Train net output #0: loss = 0.000781742 (* 1 = 0.000781742 loss)
    
    I0411 20:11:27.711103 26325 sgd_solver.cpp:106] Iteration 6500, lr = 0.0068689
    
    I0411 20:11:28.194150 26325 solver.cpp:228] Iteration 6600, loss = 0.000824402
    
    I0411 20:11:28.194197 26325 solver.cpp:244]     Train net output #0: loss = 0.000824385 (* 1 = 0.000824385 loss)
    
    I0411 20:11:28.194211 26325 sgd_solver.cpp:106] Iteration 6600, lr = 0.00683784
    
    I0411 20:11:28.673457 26325 solver.cpp:228] Iteration 6700, loss = 0.00080099
    
    I0411 20:11:28.673498 26325 solver.cpp:244]     Train net output #0: loss = 0.000800973 (* 1 = 0.000800973 loss)
    
    I0411 20:11:28.673506 26325 sgd_solver.cpp:106] Iteration 6700, lr = 0.00680711
    
    I0411 20:11:29.153903 26325 solver.cpp:228] Iteration 6800, loss = 0.000545807
    
    I0411 20:11:29.153952 26325 solver.cpp:244]     Train net output #0: loss = 0.000545789 (* 1 = 0.000545789 loss)
    
    I0411 20:11:29.153960 26325 sgd_solver.cpp:106] Iteration 6800, lr = 0.0067767
    
    I0411 20:11:29.628667 26325 solver.cpp:228] Iteration 6900, loss = 0.000540998
    
    I0411 20:11:29.628710 26325 solver.cpp:244]     Train net output #0: loss = 0.00054098 (* 1 = 0.00054098 loss)
    
    I0411 20:11:29.628729 26325 sgd_solver.cpp:106] Iteration 6900, lr = 0.0067466
    
    I0411 20:11:30.099011 26325 solver.cpp:337] Iteration 7000, Testing net (#0)
    
    I0411 20:11:30.265969 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:30.266002 26325 solver.cpp:404]     Test net output #1: loss = 0.0155301 (* 1 = 0.0155301 loss)
    
    I0411 20:11:30.267179 26325 solver.cpp:228] Iteration 7000, loss = 0.000788813
    
    I0411 20:11:30.267196 26325 solver.cpp:244]     Train net output #0: loss = 0.000788795 (* 1 = 0.000788795 loss)
    
    I0411 20:11:30.267220 26325 sgd_solver.cpp:106] Iteration 7000, lr = 0.00671681
    
    I0411 20:11:30.741459 26325 solver.cpp:228] Iteration 7100, loss = 0.000811272
    
    I0411 20:11:30.741499 26325 solver.cpp:244]     Train net output #0: loss = 0.000811254 (* 1 = 0.000811254 loss)
    
    I0411 20:11:30.741508 26325 sgd_solver.cpp:106] Iteration 7100, lr = 0.00668733
    
    I0411 20:11:31.215991 26325 solver.cpp:228] Iteration 7200, loss = 0.000813902
    
    I0411 20:11:31.216051 26325 solver.cpp:244]     Train net output #0: loss = 0.000813885 (* 1 = 0.000813885 loss)
    
    I0411 20:11:31.216274 26325 sgd_solver.cpp:106] Iteration 7200, lr = 0.00665815
    
    I0411 20:11:31.692263 26325 solver.cpp:228] Iteration 7300, loss = 0.00108772
    
    I0411 20:11:31.692306 26325 solver.cpp:244]     Train net output #0: loss = 0.0010877 (* 1 = 0.0010877 loss)
    
    I0411 20:11:31.692319 26325 sgd_solver.cpp:106] Iteration 7300, lr = 0.00662927
    
    I0411 20:11:32.167842 26325 solver.cpp:228] Iteration 7400, loss = 0.00272905
    
    I0411 20:11:32.167894 26325 solver.cpp:244]     Train net output #0: loss = 0.00272903 (* 1 = 0.00272903 loss)
    
    I0411 20:11:32.167906 26325 sgd_solver.cpp:106] Iteration 7400, lr = 0.00660067
    
    I0411 20:11:32.639610 26325 solver.cpp:337] Iteration 7500, Testing net (#0)
    
    I0411 20:11:32.808979 26325 solver.cpp:404]     Test net output #0: accuracy = 1
    
    I0411 20:11:32.809013 26325 solver.cpp:404]     Test net output #1: loss = 0.0153114 (* 1 = 0.0153114 loss)
    
    I0411 20:11:32.810209 26325 solver.cpp:228] Iteration 7500, loss = 0.000766661
    
    I0411 20:11:32.810227 26325 solver.cpp:244]     Train net output #0: loss = 0.000766644 (* 1 = 0.000766644 loss)
    
    I0411 20:11:32.810236 26325 sgd_solver.cpp:106] Iteration 7500, lr = 0.00657236
    
    I0411 20:11:33.288817 26325 solver.cpp:228] Iteration 7600, loss = 0.000239071
    
    I0411 20:11:33.288867 26325 solver.cpp:244]     Train net output #0: loss = 0.000239054 (* 1 = 0.000239054 loss)
    
    I0411 20:11:33.288877 26325 sgd_solver.cpp:106] Iteration 7600, lr = 0.00654433
    
    I0411 20:11:33.766108 26325 solver.cpp:228] Iteration 7700, loss = 0.000523386
    
    I0411 20:11:33.766147 26325 solver.cpp:244]     Train net output #0: loss = 0.000523369 (* 1 = 0.000523369 loss)
    
    I0411 20:11:33.766156 26325 sgd_solver.cpp:106] Iteration 7700, lr = 0.00651658
    
    I0411 20:11:34.243371 26325 solver.cpp:228] Iteration 7800, loss = 0.000529905
    
    I0411 20:11:34.243412 26325 solver.cpp:244]     Train net output #0: loss = 0.000529888 (* 1 = 0.000529888 loss)
    
    I0411 20:11:34.243420 26325 sgd_solver.cpp:106] Iteration 7800, lr = 0.00648911
    
    I0411 20:11:34.720605 26325 solver.cpp:228] Iteration 7900, loss = 0.000648281
    
    I0411 20:11:34.720645 26325 solver.cpp:244]     Train net output #0: loss = 0.000648263 (* 1 = 0.000648263 loss)
    
    I0411 20:11:34.720654 26325 sgd_solver.cpp:106] Iteration 7900, lr = 0.0064619
    
    I0411 20:11:35.192909 26325 solver.cpp:337] Iteration 8000, Testing net (#0)
    
    I0411 20:11:35.359594 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977201
    
    I0411 20:11:35.359629 26325 solver.cpp:404]     Test net output #1: loss = 0.0168406 (* 1 = 0.0168406 loss)
    
    I0411 20:11:35.360822 26325 solver.cpp:228] Iteration 8000, loss = 0.000737989
    
    I0411 20:11:35.360841 26325 solver.cpp:244]     Train net output #0: loss = 0.000737972 (* 1 = 0.000737972 loss)
    
    I0411 20:11:35.360851 26325 sgd_solver.cpp:106] Iteration 8000, lr = 0.00643496
    
    I0411 20:11:35.837882 26325 solver.cpp:228] Iteration 8100, loss = 0.000270061
    
    I0411 20:11:35.837921 26325 solver.cpp:244]     Train net output #0: loss = 0.000270043 (* 1 = 0.000270043 loss)
    
    I0411 20:11:35.837930 26325 sgd_solver.cpp:106] Iteration 8100, lr = 0.00640827
    
    I0411 20:11:36.315692 26325 solver.cpp:228] Iteration 8200, loss = 0.000337277
    
    I0411 20:11:36.315738 26325 solver.cpp:244]     Train net output #0: loss = 0.000337259 (* 1 = 0.000337259 loss)
    
    I0411 20:11:36.315943 26325 sgd_solver.cpp:106] Iteration 8200, lr = 0.00638185
    
    I0411 20:11:36.793006 26325 solver.cpp:228] Iteration 8300, loss = 0.00108234
    
    I0411 20:11:36.793048 26325 solver.cpp:244]     Train net output #0: loss = 0.00108232 (* 1 = 0.00108232 loss)
    
    I0411 20:11:36.793071 26325 sgd_solver.cpp:106] Iteration 8300, lr = 0.00635567
    
    I0411 20:11:37.269459 26325 solver.cpp:228] Iteration 8400, loss = 0.000307305
    
    I0411 20:11:37.269500 26325 solver.cpp:244]     Train net output #0: loss = 0.000307288 (* 1 = 0.000307288 loss)
    
    I0411 20:11:37.269508 26325 sgd_solver.cpp:106] Iteration 8400, lr = 0.00632975
    
    I0411 20:11:37.742699 26325 solver.cpp:337] Iteration 8500, Testing net (#0)
    
    I0411 20:11:37.908057 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977301
    
    I0411 20:11:37.908100 26325 solver.cpp:404]     Test net output #1: loss = 0.0170493 (* 1 = 0.0170493 loss)
    
    I0411 20:11:37.909346 26325 solver.cpp:228] Iteration 8500, loss = 0.000636403
    
    I0411 20:11:37.909364 26325 solver.cpp:244]     Train net output #0: loss = 0.000636386 (* 1 = 0.000636386 loss)
    
    I0411 20:11:37.909373 26325 sgd_solver.cpp:106] Iteration 8500, lr = 0.00630407
    
    I0411 20:11:38.384508 26325 solver.cpp:228] Iteration 8600, loss = 0.000350575
    
    I0411 20:11:38.384551 26325 solver.cpp:244]     Train net output #0: loss = 0.000350558 (* 1 = 0.000350558 loss)
    
    I0411 20:11:38.384558 26325 sgd_solver.cpp:106] Iteration 8600, lr = 0.00627864
    
    I0411 20:11:38.859849 26325 solver.cpp:228] Iteration 8700, loss = 0.000516572
    
    I0411 20:11:38.859889 26325 solver.cpp:244]     Train net output #0: loss = 0.000516555 (* 1 = 0.000516555 loss)
    
    I0411 20:11:38.859897 26325 sgd_solver.cpp:106] Iteration 8700, lr = 0.00625344
    
    I0411 20:11:39.335072 26325 solver.cpp:228] Iteration 8800, loss = 0.000515626
    
    I0411 20:11:39.335113 26325 solver.cpp:244]     Train net output #0: loss = 0.000515609 (* 1 = 0.000515609 loss)
    
    I0411 20:11:39.335120 26325 sgd_solver.cpp:106] Iteration 8800, lr = 0.00622847
    
    I0411 20:11:39.809680 26325 solver.cpp:228] Iteration 8900, loss = 0.00057994
    
    I0411 20:11:39.809730 26325 solver.cpp:244]     Train net output #0: loss = 0.000579924 (* 1 = 0.000579924 loss)
    
    I0411 20:11:39.809738 26325 sgd_solver.cpp:106] Iteration 8900, lr = 0.00620374
    
    I0411 20:11:40.279748 26325 solver.cpp:337] Iteration 9000, Testing net (#0)
    
    I0411 20:11:40.445363 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977301
    
    I0411 20:11:40.445391 26325 solver.cpp:404]     Test net output #1: loss = 0.0162502 (* 1 = 0.0162502 loss)
    
    I0411 20:11:40.446548 26325 solver.cpp:228] Iteration 9000, loss = 0.000422711
    
    I0411 20:11:40.446566 26325 solver.cpp:244]     Train net output #0: loss = 0.000422694 (* 1 = 0.000422694 loss)
    
    I0411 20:11:40.446575 26325 sgd_solver.cpp:106] Iteration 9000, lr = 0.00617924
    
    I0411 20:11:40.921710 26325 solver.cpp:228] Iteration 9100, loss = 0.00154432
    
    I0411 20:11:40.921751 26325 solver.cpp:244]     Train net output #0: loss = 0.00154431 (* 1 = 0.00154431 loss)
    
    I0411 20:11:40.921761 26325 sgd_solver.cpp:106] Iteration 9100, lr = 0.00615496
    
    I0411 20:11:41.396497 26325 solver.cpp:228] Iteration 9200, loss = 0.000685425
    
    I0411 20:11:41.396561 26325 solver.cpp:244]     Train net output #0: loss = 0.000685408 (* 1 = 0.000685408 loss)
    
    I0411 20:11:41.396576 26325 sgd_solver.cpp:106] Iteration 9200, lr = 0.0061309
    
    I0411 20:11:41.871587 26325 solver.cpp:228] Iteration 9300, loss = 0.000901223
    
    I0411 20:11:41.871639 26325 solver.cpp:244]     Train net output #0: loss = 0.000901206 (* 1 = 0.000901206 loss)
    
    I0411 20:11:41.871646 26325 sgd_solver.cpp:106] Iteration 9300, lr = 0.00610706
    
    I0411 20:11:42.348115 26325 solver.cpp:228] Iteration 9400, loss = 0.000862048
    
    I0411 20:11:42.348158 26325 solver.cpp:244]     Train net output #0: loss = 0.000862031 (* 1 = 0.000862031 loss)
    
    I0411 20:11:42.348168 26325 sgd_solver.cpp:106] Iteration 9400, lr = 0.00608343
    
    I0411 20:11:42.817559 26325 solver.cpp:337] Iteration 9500, Testing net (#0)
    
    I0411 20:11:42.986048 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977301
    
    I0411 20:11:42.986084 26325 solver.cpp:404]     Test net output #1: loss = 0.0172272 (* 1 = 0.0172272 loss)
    
    I0411 20:11:42.987263 26325 solver.cpp:228] Iteration 9500, loss = 0.000976299
    
    I0411 20:11:42.987282 26325 solver.cpp:244]     Train net output #0: loss = 0.000976283 (* 1 = 0.000976283 loss)
    
    I0411 20:11:42.987304 26325 sgd_solver.cpp:106] Iteration 9500, lr = 0.00606002
    
    I0411 20:11:43.463727 26325 solver.cpp:228] Iteration 9600, loss = 0.000524637
    
    I0411 20:11:43.463768 26325 solver.cpp:244]     Train net output #0: loss = 0.000524621 (* 1 = 0.000524621 loss)
    
    I0411 20:11:43.463776 26325 sgd_solver.cpp:106] Iteration 9600, lr = 0.00603682
    
    I0411 20:11:43.938684 26325 solver.cpp:228] Iteration 9700, loss = 0.000772679
    
    I0411 20:11:43.938725 26325 solver.cpp:244]     Train net output #0: loss = 0.000772663 (* 1 = 0.000772663 loss)
    
    I0411 20:11:43.938735 26325 sgd_solver.cpp:106] Iteration 9700, lr = 0.00601382
    
    I0411 20:11:44.415314 26325 solver.cpp:228] Iteration 9800, loss = 0.00051026
    
    I0411 20:11:44.415364 26325 solver.cpp:244]     Train net output #0: loss = 0.000510244 (* 1 = 0.000510244 loss)
    
    I0411 20:11:44.415372 26325 sgd_solver.cpp:106] Iteration 9800, lr = 0.00599102
    
    I0411 20:11:44.891758 26325 solver.cpp:228] Iteration 9900, loss = 0.000749505
    
    I0411 20:11:44.891798 26325 solver.cpp:244]     Train net output #0: loss = 0.000749489 (* 1 = 0.000749489 loss)
    
    I0411 20:11:44.891806 26325 sgd_solver.cpp:106] Iteration 9900, lr = 0.00596843
    
    I0411 20:11:45.362236 26325 solver.cpp:454] Snapshotting to binary proto file examples/mnist/lenet_iter_10000.caffemodel
    
    I0411 20:11:45.370354 26325 sgd_solver.cpp:273] Snapshotting solver state to binary proto file examples/mnist/lenet_iter_10000.solverstate
    
    I0411 20:11:45.373952 26325 solver.cpp:317] Iteration 10000, loss = 0.000326555
    
    I0411 20:11:45.373973 26325 solver.cpp:337] Iteration 10000, Testing net (#0)
    
    I0411 20:11:45.536599 26325 solver.cpp:404]     Test net output #0: accuracy = 0.977201
    
    I0411 20:11:45.536628 26325 solver.cpp:404]     Test net output #1: loss = 0.0180705 (* 1 = 0.0180705 loss)
    
    I0411 20:11:45.536634 26325 solver.cpp:322] Optimization Done.
    
    I0411 20:11:45.536639 26325 caffe.cpp:222] Optimization Done.
    
    

    得到模型和精度0.97720:

    预测

    新数据预测

    得到了caffemodel,将其用在OpenCV环境下对验证码或者其他手写字母进行识别。

    前提是要在code::block下配置好opencv3.1的lib和include路径。

    代码如下:

    
    #include <opencv2/dnn.hpp>
    
    #include <opencv2/imgproc.hpp>
    
    #include <opencv2/highgui.hpp>
    
    using namespace cv;
    
    using namespace cv::dnn;
    
    #include <fstream>
    
    #include <iostream>
    
    #include <cstdlib>
    
    using namespace std;
    
    /* Find best class for the blob (i. e. class with maximal probability) */
    
    void getMaxClass(dnn::Blob &probBlob, int *classId, double *classProb)
    
    {
    
        Mat probMat = probBlob.matRefConst().reshape(1, 1); //reshape the blob to 1x1000 matrix
    
    
        Point classNumber;
    
        minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
    
        *classId = classNumber.x;
    
    }
    
    
    
    int main(int argc, char **argv)
    
    {
    
        String modelTxt = "lenet_train_test.prototxt";
    
        String modelBin = "lenet_train_test.caffemodel";
    
    
    
         String imageFile = (argc > 1) ? argv[1] : "1x.jpg";
    
        Ptr<dnn::Importer> importer;
    
        try                                     //Try to import Caffe GoogleNet model
    
        {
    
            importer = dnn::createCaffeImporter(modelTxt, modelBin);
    
        }
    
        catch (const cv::Exception &err)        //Importer can throw errors, we will catch them
    
        {
    
            std::cerr << err.msg << std::endl;
    
        }
    
        if (!importer)
    
        {
    
            std::cerr << "Can&#39;t load network by using the following files: " << std::endl;
    
            std::cerr << "prototxt:   " << modelTxt << std::endl;
    
            std::cerr << "caffemodel: " << modelBin << std::endl;
    
            std::cerr << "bvlc_googlenet.caffemodel can be downloaded here:" << std::endl;
    
            std::cerr << "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel" << std::endl;
    
            exit(-1);
    
        }
    
        dnn::Net net;
    
        importer->populateNet(net);
    
        importer.release();                     //We don&#39;t need importer anymore
    
        Mat img = imread(imageFile,0);      //
    输入文件注意,是灰度的。
    
      //  imshow("hehe",img);
    
     //   waitKey(0);
    
        if (img.empty())
    
        {
    
            std::cerr << "Can&#39;t read image from the file: " << imageFile << std::endl;
    
            exit(-1);
    
        }
    
       //resize(img, img, Size(28, 28));       //GoogLeNet accepts only 224x224 RGB-images
    
        dnn::Blob inputBlob = dnn::Blob(img);   //Convert Mat to dnn::Blob image batch
    
        net.setBlob(".data", inputBlob);        //set the network input
    
    
        net.forward();                          //compute output
    
        dnn::Blob prob = net.getBlob("output");   //gather output of "prob" layer
    
    
        int classId;
    
        double classProb;
    
        getMaxClass(prob, &classId, &classProb);//find the best class
    
    
       std::cout << "output: " <<classId<<endl<< classProb<< std::endl;
    
      //std::cout << "output: " <<classId<<std::endl;
    
        return 0;
    
    } //main
    
    
    
    

    结果

    参考文献:

    1. http://docs.opencv.org/3.1.0/d5/de7/tutorial_dnn_googlenet.html
  • 相关阅读:
    DrawSVG
    Cool!15个创意的 CSS3 文本效果【下篇】
    分享最新15个加速 Web 开发的框架和工具
    Unsplash.it
    Cool!15个超炫的 CSS3 文本特效【上篇】
    Codrops 实验:使用 Vibrant.js 提取图像颜色
    Javscript调用iframe框架页面中函数的方法
    竞争的残酷性暂时只体现在产业竞争上(老美有发达的制药、农业、军工、金融、航空、航天、石油、化工等产业,还有以芯片为代表的高端服务业)
    Qt元对象(Meta-Object)系统与反射
    安晓辉:程序员在公司没事干时候,做什么好?(产品上想多一点,设计上想多一点,技术上做深一点、做宽一点,思维框架上学多一点)
  • 原文地址:https://www.cnblogs.com/anmengcv/p/5482209.html
Copyright © 2020-2023  润新知