• AlexNet卷积神经网络【前向反馈】


    1.代码实现

      1 # -*- coding: utf-8 -*-
      2 """
      3 Created on Wed Nov 14 17:13:05 2018
      4 
      5 @author: zhen
      6 """
      7 
      8 from datetime import datetime
      9 import math
     10 import time
     11 import tensorflow as tf
     12 
     13 batch_size = 32
     14 num_batchs = 100
     15 
     16 def print_activations(t):
     17     print(t.op.name, " ", t.get_shape().as_list())
     18     
     19 def inference(images):
     20     parameters = []
     21     with tf.name_scope('conv1') as scope:
     22         kernel = tf.Variable(tf.truncated_normal([11,11, 3, 64],dtype=tf.float32, stddev=1e-1), name='weights')
     23         conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')
     24         biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')
     25         bias = tf.nn.bias_add(conv, biases)
     26         conv1 = tf.nn.relu(bias, name=scope)
     27         print_activations(conv1)
     28         parameters += [kernel, biases]
     29     lrn1 = tf.nn.lrn(conv1, depth_radius=4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn1')
     30     pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1')
     31     print_activations(pool1)
     32 
     33     with tf.name_scope('conv2') as scope:
     34         kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 128], dtype=tf.float32, stddev=1e-1, name='weights'))
     35         conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
     36         biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases')
     37         bias = tf.nn.bias_add(conv, biases)
     38         conv2 = tf.nn.relu(bias, name=scope)
     39         parameters += [kernel, biases]
     40         print_activations(conv2)
     41         
     42     lrn2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9, beta=0.75, name='lrn2')
     43     pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2')
     44     print_activations(pool2)         
     45         
     46     with tf.name_scope('conv3') as scope:
     47         kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32, stddev=1e-1, name='weights'))
     48         conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
     49         biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
     50         bias = tf.nn.bias_add(conv, biases)
     51         conv3 = tf.nn.relu(bias, name=scope)
     52         parameters += [kernel, biases]
     53         print_activations(conv3)
     54         
     55     with tf.name_scope('conv4') as scope:
     56         kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 128], dtype=tf.float32, stddev=1e-1, name='weights'))
     57         conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
     58         biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases')
     59         bias = tf.nn.bias_add(conv, biases)
     60         conv4 = tf.nn.relu(bias, name=scope)
     61         parameters += [kernel, biases]
     62         print_activations(conv4)
     63         
     64     with tf.name_scope('conv5') as scope:
     65         kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32, stddev=1e-1, name='weights'))
     66         conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
     67         biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases')
     68         bias = tf.nn.bias_add(conv, biases)
     69         conv5 = tf.nn.relu(bias, name=scope)
     70         parameters += [kernel, biases]
     71         print_activations(conv5)
     72         
     73     pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5')
     74     print_activations(pool5)
     75     
     76     return pool5, parameters
     77 
     78 # 评估AlexNet每轮计算时间
     79 def fit_date(session, target, info_string):
     80     num_steps_burn_in = 10 # 初始计算轮数
     81     total_duration = 0.0
     82     total_duration_squared = 0.0
     83     
     84     for i in range(num_batchs + num_steps_burn_in):
     85         start_time = time.time()
     86         session.run(target)
     87         duration = time.time() - start_time
     88         if i >= num_steps_burn_in:
     89             if not i % 10:
     90                 print('%s:step %d, duration=%.3f'%(datetime.now(), i - num_steps_burn_in, duration))
     91             total_duration += duration
     92             total_duration_squared += duration * duration
     93     mn = total_duration / num_batchs
     94     vr = total_duration_squared / num_batchs - mn * mn
     95     sd = math.sqrt(vr)                
     96     print('%s:%s across %d steps,%.3f +/- %.3f sec / batch'%(datetime.now(), info_string, num_batchs, mn, sd))
     97     
     98 def fit_benchmark():
     99     with tf.Graph().as_default():
    100         image_size = 224
    101         images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))
    102         pool5, parameters = inference(images)
    103         init = tf.global_variables_initializer()
    104         sess = tf.Session()
    105         sess.run(init)
    106         
    107         fit_date(sess, pool5, "Forward")
    108         objective = tf.nn.l2_loss(pool5)
    109         grad = tf.gradients(objective, parameters)
    110         fit_date(sess, grad, "Forward-backward")
    111         
    112 fit_benchmark()

    2.结果

    conv1   [32, 56, 56, 64]
    pool1   [32, 27, 27, 64]
    conv2   [32, 27, 27, 128]
    pool2   [32, 13, 13, 128]
    conv3   [32, 13, 13, 256]
    conv4   [32, 13, 13, 128]
    conv5   [32, 13, 13, 128]
    pool5   [32, 6, 6, 128]
    2019-01-27 10:51:37.551617:step 0, duration=1.625
    2019-01-27 10:51:54.082824:step 10, duration=1.766
    2019-01-27 10:52:10.582787:step 20, duration=1.641
    2019-01-27 10:52:27.051502:step 30, duration=1.672
    2019-01-27 10:52:43.507558:step 40, duration=1.625
    2019-01-27 10:52:59.913772:step 50, duration=1.625
    2019-01-27 10:53:16.245750:step 60, duration=1.672
    2019-01-27 10:53:32.511337:step 70, duration=1.625
    2019-01-27 10:53:48.901938:step 80, duration=1.609
    2019-01-27 10:54:05.183145:step 90, duration=1.625
    2019-01-27 10:54:19.917492:Forward across 100 steps,1.640 +/- 0.031 sec / batch
    2019-01-27 10:55:47.146016:step 0, duration=7.719
    2019-01-27 10:57:04.602639:step 10, duration=7.766
    2019-01-27 10:58:26.594245:step 20, duration=9.842
    2019-01-27 11:00:01.957195:step 30, duration=8.391
    2019-01-27 11:01:35.103007:step 40, duration=10.073
    2019-01-27 11:03:07.656318:step 50, duration=8.988
    2019-01-27 11:04:31.844207:step 60, duration=8.590
    2019-01-27 11:06:01.173490:step 70, duration=9.422
    2019-01-27 11:07:28.737373:step 80, duration=10.635
    2019-01-27 11:09:03.830375:step 90, duration=8.653
    2019-01-27 11:10:19.836018:Forward-backward across 100 steps,8.804 +/- 0.817 sec / batch

    3.分析

      1、AlexNet是比赛分类项目的2012年冠军,top5错误率16.4%,8层神经网络。

      2、AlexNet中包含了几个比较新的技术点,首次在CNN中成功应用了Relu、Dropout、 Lrn等Trick。

      3、运用Relu,解决Sigmoid在网络层次较深时的梯度弥散。

      4、训练Dropout,随机忽略一些神经元,避免过拟合。

      5、使用重叠的最大池化,此前CNN普遍平均池化,最大池化避免平均池化的模糊化效果。

      6、提出了Lrn层,局部神经元活动创建竞争机制,响应比较大的值变得更大,抑制其他反馈小的神经元,增强泛化能力。
      7、数据增强,随机地从256*256的原始图像中截取224*224大小的区域,以及水平翻转的镜像,相当于增加了【(256-224)^2】*2=2048倍的数据量。

      注意:没有数据增强,仅靠原始的数据量,参数众多的CNN会陷入过拟合中。

  • 相关阅读:
    使用Jenkins 生成 Allure 报告时出现Exception in thread "main" java.lang.OutOfMemoryError: Java heap space 错误
    【Chrome】插件开发
    云笔记软件简评
    使用Jekyll + Github Pages搭建静态网站
    Windows和Linux的不同换行符导致脚本执行异常
    使用 Hugo + Github Pages 创建静态网站博客
    Vercel关闭评论 禁止Preview Deployments
    Github SubModule 指南
    epoll高效的原理
    xlrd读取Excel文件报错,for elem in self.tree.iter() if Element_has_iter else self.tree.getiterator(,raise XLRDError(FILE_FORMAT_DESCRIPTIONS[file_format] '; not supported')
  • 原文地址:https://www.cnblogs.com/yszd/p/10325847.html
Copyright © 2020-2023  润新知