• 二、LSTM处理不定长句子


    参考文献:
    https://zhuanlan.zhihu.com/p/59772104
    https://blog.csdn.net/kejizuiqianfang/article/details/100835528
    https://www.cnblogs.com/picassooo/p/13577527.html
    https://www.jianshu.com/p/043083d114d4
    https://blog.csdn.net/yangyang_yangqi/article/details/84585998

    一、nn.LSTM参数讲解

    import torch
    import torch.nn as nn
    from torch.autograd import Variable
    
    #构建网络模型---输入矩阵特征数input_size、输出矩阵特征数hidden_size、层数num_layers
    inputs = torch.randn(5,3,10)   ->(seq_len,batch_size,input_size)
    rnn = nn.LSTM(10,20,2)    ->   (input_size,hidden_size,num_layers)
    h0 = torch.randn(2,3,20)   ->(num_layers* 1,batch_size,hidden_size)
    c0 = torch.randn(2,3,20)   ->(num_layers*1,batch_size,hidden_size) 
    num_directions=1 因为是单向LSTM
    '''
    Outputs: output, (h_n, c_n)
    '''
    output,(hn,cn) = rnn(inputs,(h0,c0))

    一、LSTM中不定长句子处理

    import torch
    from torch import nn
    import torch.nn.utils.rnn as rnn_utils
    from torch.utils.data import DataLoader
    import torch.utils.data as data
    
    x1 = [
               torch.tensor([[6,6], [6,6],[6,6]]).float(),
               torch.tensor([[7,7]]).float()
    ]
    y = [
        torch.tensor([1]),
        torch.tensor([0])
    ]
    
    
    
    class MyData(data.Dataset):
        def __init__(self, data_seq, y):
            self.data_seq = data_seq
            self.y = y
    
        def __len__(self):
            return len(self.data_seq)
    
        def __getitem__(self, idx):
            tuple_ = (self.data_seq[idx], self.y[idx])
            return tuple_
    def collate_fn(data_tuple):
        data_tuple.sort(key=lambda x: len(x[0]), reverse=True)
        data = [sq[0] for sq in data_tuple]
        label = [sq[1] for sq in data_tuple]
        data_length = [len(q) for q in data]
        data = rnn_utils.pad_sequence(data, batch_first=True, padding_value=0.0)
        label = rnn_utils.pad_sequence(label, batch_first=True, padding_value=0.0)
        return data, label,data_length
    
    if __name__=='__main__':
        learning_rate = 0.001
        data = MyData(x1, y)
        data_loader = DataLoader(data, batch_size=2, shuffle=True,
                                 collate_fn=collate_fn)
        batch_x, y, batch_x_len = iter(data_loader).next()
        print(batch_x)
        print(batch_x.shape)
        print(batch_x_len)
        print(y)
        print(y.shape)
        batch_x_pack = rnn_utils.pack_padded_sequence(batch_x,
                                                      batch_x_len, batch_first=True)
        net = nn.LSTM(input_size=2, hidden_size=10, num_layers=4, batch_first=True)
        criteria = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
        print(batch_x_pack)
        out, (h1, c1) = net(batch_x_pack)

     二、孪生LSTM

    import torch
    from torch import nn
    from torch.utils.data import DataLoader
    import torch.utils.data as data
    
    
    x1 = [
               torch.tensor([[7,7]]).float(),
               torch.tensor([[6,6], [6,6],[6,6]]).float(),
    ]
    
    x2 = [
               torch.tensor([[6,3]]).float(),
               torch.tensor([[6,3], [3,6],[6,6]]).float(),
    ]
    
    y = [
        torch.tensor([1]),
        torch.tensor([0]),
    ]
    
    class MyData(data.Dataset):
        def __init__(self, data1, data2, y):
            self.data1 = data1
            self.data2 = data2
            self.y = y
    
        def __len__(self):
            return len(self.data1)
    
        def __getitem__(self, idx):
            tuple_ = (self.data1[idx], self.data2[idx],self.y[idx])
            return tuple_
    
    class SiameseLSTM(nn.Module):
        def __init__(self, input_size):
            super(SiameseLSTM, self).__init__()
            self.lstm = nn.LSTM(input_size=input_size, hidden_size=10, num_layers=4, batch_first=True)
            self.fc = nn.Linear(10, 1)
        def forward(self, data1, data2):
            out1, (h1, c1) = self.lstm(data1)
            out2, (h2, c2) = self.lstm(data2)
            pre1 = out1[:, -1, :]
            pre2 = out2[:, -1, :]
            dis = torch.abs(pre1 - pre2)
            out = self.fc(dis)
            return out
    
    if __name__=='__main__':
        learning_rate = 0.001
        data = MyData(x1, x2, y)
        data_loader = DataLoader(data, batch_size=1, shuffle=True)
        net = SiameseLSTM(2)
        criterion = nn.BCEWithLogitsLoss()
        optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
        for epoch in range(100):
            for batch_id, (data1,data2, label) in enumerate(data_loader):
                distence = net(data1,data2)
                print(distence)
                print(label)
                loss = criterion(distence, label.float())
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            print(loss)
         
  • 相关阅读:
    vue中Axios的封装和API接口的管理
    如何配置Webpack/Vue-CLI实现前端跨域(附跨域大全)
    前端面试几个重要知识点
    js常用函数
    JS中的枚举和不可枚举
    可配置性属性和不可配置性属性
    Object.create()和深拷贝
    JavaScript 中 call()、apply()、bind() 的用法
    从深入到通俗:Object.prototype.toString.call()
    js原生实现三级联动下拉菜单
  • 原文地址:https://www.cnblogs.com/zhangxianrong/p/14124893.html
Copyright © 2020-2023  润新知