• tflearn中一些CNN RNN的例子


    lstm.py

    # -*- coding: utf-8 -*-
    """
    Simple example using LSTM recurrent neural network to classify IMDB
    sentiment dataset.
    References:
        - Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber, Neural
        Computation 9(8): 1735-1780, 1997.
        - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,
        and Christopher Potts. (2011). Learning Word Vectors for Sentiment
        Analysis. The 49th Annual Meeting of the Association for Computational
        Linguistics (ACL 2011).
    Links:
        - http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
        - http://ai.stanford.edu/~amaas/data/sentiment/
    """
    from __future__ import division, print_function, absolute_import
    
    import tflearn
    from tflearn.data_utils import to_categorical, pad_sequences
    from tflearn.datasets import imdb
    
    # IMDB Dataset loading
    train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test
    
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)
    
    # Network building
    net = tflearn.input_data([None, 100])
    net = tflearn.embedding(net, input_dim=10000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')
    
    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
    batch_size=32)

    dynamic_lstm.py

    # -*- coding: utf-8 -*-
    """
    Simple example using a Dynamic RNN (LSTM) to classify IMDB sentiment dataset.
    Dynamic computation are performed over sequences with variable length.
    References:
        - Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber, Neural
        Computation 9(8): 1735-1780, 1997.
        - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,
        and Christopher Potts. (2011). Learning Word Vectors for Sentiment
        Analysis. The 49th Annual Meeting of the Association for Computational
        Linguistics (ACL 2011).
    Links:
        - http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
        - http://ai.stanford.edu/~amaas/data/sentiment/
    """
    from __future__ import division, print_function, absolute_import
    
    import tflearn
    from tflearn.data_utils import to_categorical, pad_sequences
    from tflearn.datasets import imdb
    
    # IMDB Dataset loading
    train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test
    
    # Data preprocessing
    # NOTE: Padding is required for dimension consistency. This will pad sequences
    # with 0 at the end, until it reaches the max sequence length. 0 is used as a
    # masking value by dynamic RNNs in TFLearn; a sequence length will be
    # retrieved by counting non zero elements in a sequence. Then dynamic RNN step
    # computation is performed according to that length.
    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)
    
    # Network building
    net = tflearn.input_data([None, 100])
    # Masking is not required for embedding, sequence length is computed prior to
    # the embedding op and assigned as 'seq_length' attribute to the returned Tensor.
    net = tflearn.embedding(net, input_dim=10000, output_dim=128)
    net = tflearn.lstm(net, 128, dropout=0.8, dynamic=True)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')
    
    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
    batch_size=32)

    bidirectional_lstm.py

    # -*- coding: utf-8 -*-
    """
    Simple example using LSTM recurrent neural network to classify IMDB
    sentiment dataset.
    References:
        - Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber, Neural
        Computation 9(8): 1735-1780, 1997.
        - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,
        and Christopher Potts. (2011). Learning Word Vectors for Sentiment
        Analysis. The 49th Annual Meeting of the Association for Computational
        Linguistics (ACL 2011).
    Links:
        - http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
        - http://ai.stanford.edu/~amaas/data/sentiment/
    """
    
    from __future__ import division, print_function, absolute_import
    
    import tflearn
    from tflearn.data_utils import to_categorical, pad_sequences
    from tflearn.datasets import imdb
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.embedding_ops import embedding
    from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
    from tflearn.layers.estimator import regression
    
    # IMDB Dataset loading
    train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test
    
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=200, value=0.)
    testX = pad_sequences(testX, maxlen=200, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)
    
    # Network building
    net = input_data(shape=[None, 200])
    net = embedding(net, input_dim=20000, output_dim=128)
    net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
    net = dropout(net, 0.5)
    net = fully_connected(net, 2, activation='softmax')
    net = regression(net, optimizer='adam', loss='categorical_crossentropy')
    
    # Training
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
    model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)

    cnn_sentence_classification.py

    # -*- coding: utf-8 -*-
    """
    Simple example using convolutional neural network to classify IMDB
    sentiment dataset.
    References:
        - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,
        and Christopher Potts. (2011). Learning Word Vectors for Sentiment
        Analysis. The 49th Annual Meeting of the Association for Computational
        Linguistics (ACL 2011).
        - Kim Y. Convolutional Neural Networks for Sentence Classification[C]. 
        Empirical Methods in Natural Language Processing, 2014.
    Links:
        - http://ai.stanford.edu/~amaas/data/sentiment/
        - http://emnlp2014.org/papers/pdf/EMNLP2014181.pdf
    """
    from __future__ import division, print_function, absolute_import
    
    import tensorflow as tf
    import tflearn
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.conv import conv_1d, global_max_pool
    from tflearn.layers.merge_ops import merge
    from tflearn.layers.estimator import regression
    from tflearn.data_utils import to_categorical, pad_sequences
    from tflearn.datasets import imdb
    
    # IMDB Dataset loading
    train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test
    
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY)
    testY = to_categorical(testY)
    
    # Building convolutional network
    network = input_data(shape=[None, 100], name='input')
    network = tflearn.embedding(network, input_dim=10000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY, n_epoch = 5, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
  • 相关阅读:
    Coding styles, code review
    Some links haven't take a look(C++, JS, IE9)
    前端学习,找到一下一些问题的答案
    Browser judgement
    Theme of Google
    Browser Time Line
    迷茫在10点左右……
    WebPageTest 检测web站点性能网站测试工具
    Invoke IFrame/window in cross domain in IE&FF notes
    [解决]多线程中出现由于代码已经过优化或者本机框架位于调用堆栈之上,无法计算表达
  • 原文地址:https://www.cnblogs.com/bonelee/p/8026216.html
Copyright © 2020-2023  润新知