Caffe实现概述
目录
一、caffe配置文件介绍
二、标准层的定义
三、网络微调技巧
其中,multistep最为常用
四、Linux脚本使用及LMDB文件生成
五、带你设计一个Caffe网络,用于分类任务
下面:
使用pycaffe生成solver配置
使用pycaffe生成caffe测试网络和训练网络
数据集下载:
# demoCaffe
数据集下载,cifar mnist:
百度云盘:
链接: https://pan.baidu.com/s/1bHFQUz7Q6BMBZv25AhsXKQ 密码: dva9
链接: https://pan.baidu.com/s/1rPRjf2hanlYYjBQQDmIjNQ 密码: 5nhv
1. lmdb数据制作:
手动实现: https://blog.csdn.net/yx2017/article/details/72953537
https://www.jianshu.com/p/9d7ed35960cb
代码实现:https://www.cnblogs.com/leemo-o/p/4990021.html
https://www.jianshu.com/p/ef84715e0fdc
以下仅供对比阅读:
demo_lmdb.py: 生成lmdb格式数据
- import lmdb
- import numpy as np
- import cv2
- import caffe
- from caffe.proto import caffe_pb2
- def write():
- # basic setting
- 10. lmdb_file = 'lmdb_data'
- 11. batch_size = 256
- 12.
- 13.
- 14. lmdb_env = lmdb.open(lmdb_file, map_size = int(1e12))
- 15.
- 16. lmdb_txn = lmdb_env.begin(write = True)
- 17.
- 18. for x in range(batch_size):
- 19. data = np.ones((3, 64, 64), np.uint8)
- 20. label = x
- 21.
- 22. datum = caffe.io.array_to_datum(data,label)
- 23. keystr = "{:0>8d}".format(x)
- 24.
- 25. lmdb_txn.put(keystr, datum.SerializeToString())
- 26.
- 27. lmdb_txn.commit()
- 28.
29. def read():
- 30. lmdb_env = lmdb.open('lmdb_data')
- 31. lmdb_txt = lmdb_env.begin()
- 32.
- 33. datum = caffe_pb2.Datum()
- 34.
- 35. for key, value in lmdb_txt.cursor():
- 36.
- 37. datum.ParseFromString(value)
- 38.
- 39. label = datum.label
- 40.
- 41. data = caffe.io.datum_to_array(datum)
- 42.
- 43. print(label)
- 44. print(data)
- 45.
- 46.
47. if __name__ == '__main__':
- 48. write()
- 49. read()
demo_create_solver.py: 生成solver配置文件
- from caffe.proto import caffe_pb2
- s = caffe_pb2.SolverParameter()
- s.train_net = "train.prototxt"
- s.test_net.append("test.prototxt")
- s.test_interval = 100
- s.test_iter.append(10)
- 10.
11. s.max_iter = 1000
- 12.
13. s.base_lr = 0.1
- 14.
15. s.weight_decay = 5e-4
- 16.
17. s.lr_policy = "step"
- 18.
19. s.display = 10
- 20.
21. s.snapshot = 10
- 22.
23. s.snapshot_prefix = "model"
- 24.
25. s.type = "SGD"
- 26.
27. s.solver_mode = caffe_pb2.SolverParameter.GPU
- 28.
29. with open("net/s.prototxt", "w") as f:
- 30. f.write(str(s))
- 31.
- 32.
- 33.
- 34.
结果如下:
- train_net: "/home/kuan/PycharmProjects/demo_cnn_net/net/train.prototxt"
- test_net: "/home/kuan/PycharmProjects/demo_cnn_net/net/test.prototxt"
- test_iter: 1000
- test_interval: 100
- base_lr: 0.10000000149
- display: 100
- max_iter: 100000
- lr_policy: "step"
- weight_decay: 0.000500000023749
10. snapshot: 100
11. snapshot_prefix: "/home/kuan/PycharmProjects/demo_cnn_net/cnn_model/mnist/lenet/"
12. solver_mode: GPU
13. type: "SGD"
demo_creat_net.py: 创建网络
- import caffe
- def create_net():
- net = caffe.NetSpec()
- net.data, net.label = caffe.layers.Data(source="data.lmdb",
- backend=caffe.params.Data.LMDB,
- batch_size=32,
- ntop=2, #数据层数据个数,分别为data,label
- 10. transform_param=dict(crop_size=40, mirror=True)
- 11. )
- 12.
- 13. net.conv1 = caffe.layers.Convolution(net.data, num_output=20, kernel_size=5,
- 14. weight_filler={"type": "xavier"},
- 15. bias_filler={"type":"xavier"}) #卷积核参数
- 16.
- 17. net.relu1 = caffe.layers.ReLU(net.conv1, in_place=True)
- 18.
- 19. net.pool1 = caffe.layers.Pooling(net.relu1, pool=caffe.params.Pooling.MAX,
- 20. kernel_size=3, stride=2)
- 21.
- 22. net.conv2 = caffe.layers.Convolution(net.pool1, num_output=32, kernel_size=3,
- 23. pad=1,
- 24. weight_filler={"type": "xavier"},
- 25. bias_filler={"type": "xavier"})
- 26.
- 27. net.relu2 = caffe.layers.ReLU(net.conv2, in_place=True)
- 28.
- 29. net.pool2 = caffe.layers.Pooling(net.relu2, pool=caffe.params.Pooling.MAX,
- 30. kernel_size=3, stride=2)
- 31. #下面为全连接层
- 32. net.fc3 = caffe.layers.InnerProduct(net.pool2, num_output=1024, weight_filler=dict(type='xavier'))
- 33.
- 34. net.relu3 = caffe.layers.ReLU(net.fc3, in_place=True)
- 35.
- 36. ##drop
- 37. net.drop = caffe.layers.Dropout(net.relu3, dropout_param=dict(dropout_ratio=0.5))
- 38.
- 39. net.fc4 = caffe.layers.InnerProduct(net.drop, num_output=10, weight_filler=dict(type='xavier'))
- 40.
- 41. net.loss = caffe.layers.SoftmaxWithLoss(net.fc4, net.label)
- 42.
- 43. with open("net/tt.prototxt", 'w') as f:
- 44. f.write(str(net.to_proto()))
- 45.
- 46.
47. if __name__ == '__main__':
- 48. create_net()
生成结果如下:
- layer {
- name: "data"
- type: "Data"
- top: "data"
- top: "label"
- transform_param {
- mirror: true
- crop_size: 40
- }
- 10. data_param {
- 11. source: "/home/kuan/PycharmProjects/demo_cnn_net/lmdb_data"
- 12. batch_size: 32
- 13. backend: LMDB
- 14. }
15. }
16. layer {
- 17. name: "conv1"
- 18. type: "Convolution"
- 19. bottom: "data"
- 20. top: "conv1"
- 21. convolution_param {
- 22. num_output: 20
- 23. kernel_size: 5
- 24. weight_filler {
- 25. type: "xavier"
- 26. }
- 27. bias_filler {
- 28. type: "xavier"
- 29. }
- 30. }
31. }
32. layer {
- 33. name: "relu1"
- 34. type: "ReLU"
- 35. bottom: "conv1"
- 36. top: "conv1"
37. }
38. layer {
- 39. name: "pool1"
- 40. type: "Pooling"
- 41. bottom: "conv1"
- 42. top: "pool1"
- 43. pooling_param {
- 44. pool: MAX
- 45. kernel_size: 3
- 46. stride: 2
- 47. }
48. }
49. layer {
- 50. name: "conv2"
- 51. type: "Convolution"
- 52. bottom: "pool1"
- 53. top: "conv2"
- 54. convolution_param {
- 55. num_output: 32
- 56. pad: 1
- 57. kernel_size: 3
- 58. weight_filler {
- 59. type: "xavier"
- 60. }
- 61. bias_filler {
- 62. type: "xavier"
- 63. }
- 64. }
65. }
66. layer {
- 67. name: "relu2"
- 68. type: "ReLU"
- 69. bottom: "conv2"
- 70. top: "conv2"
71. }
72. layer {
- 73. name: "pool2"
- 74. type: "Pooling"
- 75. bottom: "conv2"
- 76. top: "pool2"
- 77. pooling_param {
- 78. pool: MAX
- 79. kernel_size: 3
- 80. stride: 2
- 81. }
82. }
83. layer {
- 84. name: "fc3"
- 85. type: "InnerProduct"
- 86. bottom: "pool2"
- 87. top: "fc3"
- 88. inner_product_param {
- 89. num_output: 1024
- 90. weight_filler {
- 91. type: "xavier"
- 92. }
- 93. }
94. }
95. layer {
- 96. name: "relu3"
- 97. type: "ReLU"
- 98. bottom: "fc3"
- 99. top: "fc3"
- }
- layer {
- name: "drop"
- type: "Dropout"
- bottom: "fc3"
- top: "drop"
- dropout_param {
- dropout_ratio: 0.5
- }
- }
- layer {
- name: "fc4"
- type: "InnerProduct"
- bottom: "drop"
- top: "fc4"
- inner_product_param {
- num_output: 10
- weight_filler {
- type: "xavier"
- }
- }
- }
- layer {
- name: "loss"
- type: "SoftmaxWithLoss"
- bottom: "fc4"
- bottom: "label"
- top: "loss"
- }
demo_train.py训练网络:
- import sys
- sys.path.append('/home/kuan/AM-softmax_caffe/python')
- import caffe
- solver = caffe.SGDSolver("/home/kuan/PycharmProjects/demo_cnn_net/cnn_net/alexnet/solver.prototxt")
- solver.solve()
demo_test.py:测试网络
- import sys
- sys.path.append('/home/kuan/AM-softmax_caffe/python')
- import caffe
- import numpy as np
- ##caffemodel deploy.prototxt
- deploy = "/home/kuan/PycharmProjects/demo_cnn_net/cnn_net/alexnet/deploy.prototxt"
10. model = "/home/kuan/PycharmProjects/demo_cnn_net/cnn_model/cifar/alexnet/alexnet_iter_110.caffemodel"
- 11.
12. net = caffe.Net(deploy, model, caffe.TEST)
- 13.
- 14.
15. net.blobs["data"].data[...] = np.ones((3,32,32),np.uint8)
- 16.
17. net.forward()
- 18.
19. prob = net.blobs["prob"].data[0]
- 20.
21. print(prob)
- 22.