文本情感分类:
文本情感分类采用LSTM的最后一层输出
比如双层的LSTM,使用正向的最后一层和反向的最后一层进行拼接
def forward(self,input): ''' :param input: :return: ''' input_embeded = self.embedding(input) #[batch_size,seq_len,200] output,(h_n,c_n) = self.lstm(input_embeded) out = torch.cat(h_n[-1,:,:],h_n[-2,:,:],dim=-1) #拼接正向最后一个输出和反向最后一个输出 #进行全连接 out_fc1 = self.fc1(out) #进行relu out_fc1_relu = F.relu(out_fc1) #全连接 out = self.fc2(out_fc1_relu) return F.log_softmax(out,dim=-1)
命名实体识别NER:
实体识别需要LSTM的output的全部输出。
如果有CRF层,则将输出全部给CRF层。
def forward(self, words,emdlabel,speechtag, labels, seq_lengths): output, _ = self.bilstm(words,emdlabel,speechtag, seq_lengths) output = self.logistic(output) pre_score = self.crf(output) label_score = self.crf._score_sentence(output, labels) return (pre_score - label_score).mean(), None def predict(self, word, emdlabel,speechtag,seq_lengths): lstm_out, _ = self.bilstm(word, emdlabel,speechtag, seq_lengths) out = self.logistic(lstm_out) return self.crf.viterbi_decode(out)