• tensorflow2.0——实现先卷积后LSTM的网络


    class MyRnn(tf.keras.Model):
        def __init__(self, units):
            super(MyRnn, self).__init__()
            self.mycnn = tf.keras.Sequential([
                tf.keras.layers.Conv2D(12, kernel_size=[3, 3], activation=tf.nn.relu),
                tf.keras.layers.BatchNormalization(),
                tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
                tf.keras.layers.Conv2D(24, kernel_size=[4, 4], activation=tf.nn.relu),
                tf.keras.layers.BatchNormalization(),
                tf.keras.layers.MaxPool2D(pool_size=[3, 3], strides=3, padding='same'),
                tf.keras.layers.Conv2D(48, kernel_size=[3, 3], activation=tf.nn.relu),
                tf.keras.layers.BatchNormalization(),
                tf.keras.layers.AveragePooling2D(pool_size=[8, 8], strides=8, padding='same'),
                tf.keras.layers.Flatten()
            ])
    
            #   [b,80,100] ,-> h_dim:units(比如64)
            self.units = units
            #     # [b, 80, 100] = > [b, 64]
            #     self.rnn_cell0 = tf.keras.layers.LSTMCell(units,dropout=0.5)
            #     self.rnn_cell1 = tf.keras.layers.LSTMCell(units, dropout=0.5)
            self.myrnn = tf.keras.Sequential([
                tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
                tf.keras.layers.LSTM(units, return_sequences=True, unroll=True),
                tf.keras.layers.LSTM(units, unroll=True),
            ])
    
            #   fc , [b,80,100] =>[b,64]=>[b,1]
            self.myDense = tf.keras.Sequential([
                tf.keras.layers.Dense(128, activation='relu'),
                tf.keras.layers.Dense(128, activation='relu'),
                tf.keras.layers.Dense(1, bias_initializer=tf.keras.initializers.constant(0.))
            ])
    
    
        def __call__(self, inputs, training=None):
            """
            :param inputs:[b,80] [b,句子最大长度(80)]
            :param training:
            """
            # [b,80]
            x = tf.cast(inputs,dtype=tf.float32)
    
            # print('x进:',x.shape)
            out = tf.keras.layers.TimeDistributed(self.mycnn)(x)
            # print('cnnout:', out.shape)
    
            out = self.myrnn(out)
            # print('out:', out.shape)
            # out = tf.squeeze(out)
    
            x = self.myDense(out)
            return x
    
    
    #   定义优化器
    opt = tf.keras.optimizers.Adam(lr=learn_rate)
    sum_model = MyRnn(128)
  • 相关阅读:
    未知高度、宽度,垂直水平居中
    js千分位处理
    浮动与两侧皆自适应布局
    html5 canvas 学习笔记(一)
    全面理解javascript的caller,callee,call,apply概念[转载]
    cocos2dx android运行Luac编译后的lua代码
    cocos2dx android lua文件设置问题
    cocos2dx android resources.ap_ does not exist
    Gink掉过的坑(一):将CCTableView导入到lua中
    数据结构之内部排序个人总结
  • 原文地址:https://www.cnblogs.com/cxhzy/p/13968503.html
Copyright © 2020-2023  润新知