• pytorch1.0实现RNN for Regression


    import torch
    from torch import nn
    import numpy as np
    import matplotlib.pyplot as plt
    
    # 超参数
    # Hyper Parameters
    TIME_STEP = 10  # rnn time step
    INPUT_SIZE = 1  # rnn input size
    LR = 0.02  # learning rate
    
    # 生成数据
    # show data
    steps = np.linspace(0, np.pi * 2, 100, dtype=np.float32)  # float32 for converting torch FloatTensor
    x_np = np.sin(steps)  # 输入
    y_np = np.cos(steps)  # 目标
    plt.plot(steps, y_np, 'r-', label='target (cos)')
    plt.plot(steps, x_np, 'b-', label='input (sin)')
    plt.legend(loc='best')
    plt.show()
    
    # 定义神经网络
    # 对每一个 r_out 都得放到 Linear 中去计算出预测的 output, 所以能用一个 for loop 来循环计算.
    class RNN(nn.Module):
        def __init__(self):
            super(RNN, self).__init__() # 继承 __init__ 功能
    
            self.rnn = nn.RNN(               # 一个普通的 RNN
                input_size=INPUT_SIZE,
                hidden_size=32,  # rnn hidden unit  32个神经元
                num_layers=1,  # number of rnn layer # 有几层 RNN layers
                batch_first=True,  # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size) # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
            )
            self.out = nn.Linear(32, 1)
    
        def forward(self, x, h_state): # 因为 hidden state 是连续的, 所以要一直传递这一个 state
            # x (batch, time_step, input_size)
            # h_state (n_layers, batch, hidden_size)
            # r_out (batch, time_step, hidden_size)
            r_out, h_state = self.rnn(x, h_state)  # h_state 也要作为 RNN 的一个输入
    
            outs = []  # save all predictions  # 保存所有时间点的预测值
            for time_step in range(r_out.size(1)):  # calculate output for each time step   # 对每一个时间点计算 output
                outs.append(self.out(r_out[:, time_step, :]))
            return torch.stack(outs, dim=1), h_state
    
            # instead, for simplicity, you can replace above codes by follows
            # r_out = r_out.view(-1, 32)
            # outs = self.out(r_out)
            # outs = outs.view(-1, TIME_STEP, 1)
            # return outs, h_state
    
            # or even simpler, since nn.Linear can accept inputs of any dimension
            # and returns outputs with same dimension except for the last
            # outs = self.out(r_out)
            # return outs
    
    rnn = RNN()
    print(rnn)
    # 选择优化器
    optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all cnn parameters
    # 选择损失函数
    loss_func = nn.MSELoss()
    
    h_state = None  # for initial hidden state
    
    plt.figure(1, figsize=(12, 5))
    plt.ion()  # continuously plot
    
    for step in range(100):
        start, end = step * np.pi, (step + 1) * np.pi  # time range
        # use sin predicts cos
        steps = np.linspace(start, end, TIME_STEP, dtype=np.float32,
                            endpoint=False)  # float32 for converting torch FloatTensor
        x_np = np.sin(steps)
        y_np = np.cos(steps)
    
        x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis])  # shape (batch, time_step, input_size)
        y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
    
        prediction, h_state = rnn(x, h_state)  # rnn output
        # !! next step is important !!
        h_state = h_state.data  # repack the hidden state, break the connection from last iteration
    
        loss = loss_func(prediction, y)  # calculate loss
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
    
        # plotting
        plt.plot(steps, y_np.flatten(), 'r-')
        plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
        plt.draw();
        plt.pause(0.05)
    
    plt.ioff()
    plt.show()
  • 相关阅读:
    【PostgreSQL-9.6.3】触发器概述(普通触发器)
    【MySQL】二进制分发安装
    【MySQL】RPM包安装
    【PostgreSQL-9.6.3】分区表
    【PL/SQL】用星号拼出金字塔
    【PostgreSQL-9.6.3】临时表
    【PL/SQL】触发器示例:记录加薪
    【PL/SQL】九九乘法口诀表
    数据结构和算法
    类元编程
  • 原文地址:https://www.cnblogs.com/jeshy/p/11364755.html
Copyright © 2020-2023  润新知