• 3、tensorflow构建网络


    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                             activation_fn=tf.nn.relu,
                             weights_initializer=tf.glorot_uniform_initializer(),                
                    #weight_initializer=tf.contrib.layers.xavier_initializer_conv2d()#CNN用这个进行初始化,去掉_conv2d用于初始化fc
                  #weights_initializer= tf.contrib.layers.variance_scaling_initializer( ),
                             biases_initializer=tf.constant_initializer(0)):
            
            net = slim.conv2d(inputs, 64, [11, 11], 4)#步长默认是1,这里改成4了;64是输出特征图个数
            net = slim.max_pool2d(net, [3, 3])
            net = slim.conv2d(net, 192, [5, 5])
            net = slim.max_pool2d(net, [3, 3])
            net = slim.conv2d(net, 384, [3, 3])
            net = slim.conv2d(net, 384, [3, 3])
            net = slim.conv2d(net, 256, [3, 3])
            net = slim.max_pool2d(net, [3, 3])
            
            # 数据扁平化
            net = slim.flatten(net)
            net = slim.fully_connected(net, 1024)
            net = slim.dropout(net, is_training=is_training)
            
            net0 = slim.fully_connected(net, num_classes, activation_fn=tf.nn.softmax)
            net1 = slim.fully_connected(net, num_classes, activation_fn=tf.nn.softmax)
            net2 = slim.fully_connected(net, num_classes, activation_fn=tf.nn.softmax)
            net3 = slim.fully_connected(net, num_classes, activation_fn=tf.nn.softmax)

    BN层:
    net = tf.layers.batch_normalization(net,
                                        trainable=True,#如果是True,就表示这个batch normalization后,会经过一个线性变换
                            training=is_training)#是训练模式,还是测试模式,测试模式会利用训练模式的均值和方差

     L2正则化

    方法一

    weight_decay = 0.001

    base_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])
    loss = base_loss + l2_loss

    方法二
    x = tf.layers.conv2d(x, 512, (3, 3), padding='same', activation=tf.nn.relu, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001) base_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)) l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss = tf.add_n([base_loss] + l2_loss, name="loss")
  • 相关阅读:
    CF703D Mishka and Interesting sum
    CF697D Puzzles
    SCOI2017酱油记
    [BZOJ4730][清华集训2016][UOJ266] Alice和Bob又在玩游戏
    BZOJ4311:向量
    BZOJ4520: [Cqoi2016]K远点对
    BZOJ4555: [Tjoi2016&Heoi2016]求和
    [Codechef November Challenge 2012] Arithmetic Progressions
    agc040
    补题
  • 原文地址:https://www.cnblogs.com/yunshangyue71/p/13611286.html
Copyright © 2020-2023  润新知