• yolov2-tiny-voc.cfg 参数解析


    一、参数解析

    [net]
    batch=64         # number of images pushed with a forward pass through the network
    subdivisions=8   # 源码中的图片数量int imgs = net.batch * net.subdivisions * ngpus,按subdivisions大小分批进行训练
    height=416       # height of input image
    width=416        # width of input image
    channels=3       # channel of input image
    momentum=0.9     # CNN-梯度下降法中一种常用的加速技术
    decay=0.0005     # CNN-防止过拟合
    
    # 对于每次迭代训练,YOLOv2会基于角度(angle),饱和度(saturation),曝光(exposure),色调(hue)产生新的训练图片
    angle=0          # 图片角度变化,单位为度,假如angle=5,就是生成新图片的时候随机旋转-5~5度
    saturation = 1.5 # 饱和度变化大小,1到1.5倍
    exposure = 1.5   # 曝光变化大小,1到1.5倍
    hue=.1           # 色调变化范围,-0.1到0.1
    
    learning_rate=0.0001  # 学习率
    max_batches = 45000   # 最大迭代次数
    policy=steps          # 调整学习率的policy:CONSTANT, STEP, EXP, POLY,STEPS, SIG, RANDOM
    steps=100,25000,35000 # 根据batch_num调整学习率,若steps=100,25000,35000,则在迭代100次,25000次,35000次时学习率发生变化,该参数与policy中的steps对应
    scales=10,.1,.1       # 相对于当前学习率的变化比率,累计相乘,与steps中的参数个数保持一致
    
    [convolutional]
    batch_normalize=1     # 是否做BN-batch_normalize
    filters=32
    size=3
    stride=1
    pad=1
    activation=leaky
    # 激活函数-activation 包括logistic,loggy,relu,elu,relie,plse,hardtan,lhtan,linear,ramp,leaky,tanh,stair.
    
    [maxpool]
    size=2
    stride=2
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [maxpool]
    size=2
    stride=2
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [maxpool]
    size=2
    stride=2
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [maxpool]
    size=2
    stride=2
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [maxpool]
    size=2
    stride=2
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky
    
    
    #######
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    # the route layer is to bring finer grained features in from earlier in the network
    [route]
    layers=-9
    
    # the reorg layer is to make these features match the feature map size at the later layer;
    # The end feature map is 13x13, the feature map from earlier is 26x26x512.
    # The reorg layer maps the 26x26x512 feature map onto a 13x13x2048 feature map so that it can be concate_nated with the feature maps at 13x13 resolution.
    [reorg]
    stride=2
    
    [route]
    layers=-1,-3
    
    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky
    
    [convolutional]
    size=1
    stride=1
    pad=1
    filters=125
    activation=linear
    
    [region]
    # anchors: 预测框的初始宽高,第一个是w,第二个是h,总数量是num*2.
    # YOLOv2作者说anchors是使用K-MEANS获得,其实就是计算出哪种类型的框比较多,可以增加收敛速度,如果不设置anchors,默认是0.5.
    anchors = 1.08,1.19,  3.42,4.41,  6.63,11.38,  9.42,5.11,  16.62,10.52
    bias_match=1 # 如果为1,计算best iou时,预测宽高强制与anchors一致
    classes=20   # 类别数量
    coords=4     # BoundingBox的tx,ty,tw,th,tx与ty是相对于左上角的grid,同时是当前grid的比例,tw与th是宽度与高度取对数
    num=5        # 每个grid预测的BoundingBox个数
    softmax=1    # 如果为1,使用softmax
    jitter=.2    # 利用数据抖动产生更多数据抑制过拟合.YOLOv2中使用的是crop,filp,以及net层的angle,flip是随机的,crop就是jitter的参数,tiny-yolo-voc.cfg中jitter=.2,就是在0~0.2中进行crop.
    rescore=1    # 决定使用哪种方式计算IOU的误差,为1时,使用当前best iou计算,为0时,使用1计算
    
    # *_scale是YOLOv1论文中cost function的权重,哪一个更大,每一次更新权重的时候,对应方面的权重更新相对比重更大
    object_scale=5
    noobject_scale=1
    class_scale=1
    coord_scale=1
    
    absolute=1
    thresh = .6 # 决定是否需要计算IOU误差的参数,大于thresh,IOU误差不会夹在cost function中
    random=0    # 如果为1每次迭代图片大小随机从320到608,步长为32,如果为0,每次训练大小与输入大小一致
    

     
    二、训练log中各参数的意义

    Region Avg IOU:平均的IOU,代表预测的bounding box和ground truth的交集与并集之比,期望该值趋近于1。
    Class:是标注物体的概率,期望该值趋近于1.
    Obj:期望该值趋近于1.
    No Obj:期望该值越来越小但不为零.
    Avg Recall:期望该值趋近1
    avg:平均损失,期望该值趋近于0


    mAP定义及相关概念

        mAP: mean Average Precision, 即各类别AP的平均值
        AP: PR曲线下面积,后文会详细讲解
        PR曲线: Precision-Recall曲线
        Precision: TP / (TP + FP)
        Recall: TP / (TP + FN)
        TP: IoU>0.5的检测框数量(同一Ground Truth只计算一次)
        FP: IoU<=0.5的检测框,或者是检测到同一个GT的多余检测框的数量
        FN: 没有检测到的GT的数量

    转自:
    ---------------------  
    作者:_John_Tian_  yolo-voc.2.0.cfg 参数解析
    来源:CSDN  
    原文:https://blog.csdn.net/tianzhaixing2013/article/details/79269275  
    版权声明:本文为博主原创文章,转载请附上博文链接!

  • 相关阅读:
    HTML5的data-*自己定义属性
    Cocos2d-X直接使用OpenGL接口
    经典递推问题错排公式分析
    一步一步跟我学习lucene(18)---lucene索引时join和查询时join使用演示样例
    Linux下安装JRE和Eclipse IDE for C/C++ Developers
    Android 官方推荐 : DialogFragment 创建对话框
    hibernate 继承映射
    [Swift通天遁地]七、数据与安全-(7)创建文件浏览器:以可视化的方式浏览沙箱文件
    [Swift通天遁地]七、数据与安全-(6)管理文件夹和创建并操作文件
    [Swift]LeetCode398. 随机数索引 | Random Pick Index
  • 原文地址:https://www.cnblogs.com/MY0213/p/9821381.html
Copyright © 2020-2023  润新知