predict = pd.DataFrame(scaler.inverse_transform(y_train_pred.detach().numpy()))
original = pd.DataFrame(scaler.inverse_transform(y_train.detach().numpy()))
sns.set_style("darkgrid")
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2)
plt.subplot(1, 2, 1)
ax = sns.lineplot(x = original.index, y = original[0], label="Data", color='royalblue')
ax = sns.lineplot(x = predict.index, y = predict[0], label="Training Prediction (LSTM)", color='tomato')
ax.set_title('Stock price', size = 14, fontweight='bold')
ax.set_xlabel("Days", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
plt.subplot(1, 2, 2)
ax = sns.lineplot(data=hist, color='royalblue')
ax.set_xlabel("Epoch", size = 14)
ax.set_ylabel("Loss", size = 14)
ax.set_title("Training Loss", size = 14, fontweight='bold')
fig.set_figheight(6)
fig.set_figwidth(16)
# make predictions
y_test_pred = model(x_test)
# invert predictions
y_train_pred = scaler.inverse_transform(y_train_pred.detach().numpy())
y_train = scaler.inverse_transform(y_train.detach().numpy())
y_test_pred = scaler.inverse_transform(y_test_pred.detach().numpy())
y_test = scaler.inverse_transform(y_test.detach().numpy())
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
lstm.append(trainScore)
lstm.append(testScore)
lstm.append(training_time)
问题描述:
选用Close和Low两个特征,使用窗口time_steps窗口的2个特征,然后预测Close这一个特征数据未来一天的数据
当batch_first=True,则LSTM的inputs=(batch_size,time_steps,input_size)
batch_size = len(data)-time_steps
time_steps = 滑动窗口,本项目中值为lookback
input_size = 2【因为选取了Close和Low两个特征】
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
import torch
import torch.nn as nn
import seaborn as sns
import math, time
from sklearn.metrics import mean_squared_error
filepath = './data/rlData.csv'
data = pd.read_csv(filepath)
data = data.sort_values('Date')
data.head()
data.shape
sns.set_style("darkgrid")
plt.figure(figsize = (15,9))
plt.plot(data[['Close']])
plt.xticks(range(0,data.shape[0],20), data['Date'].loc[::20], rotation=45)
plt.title("****** Stock Price",fontsize=18, fontweight='bold')
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price (USD)',fontsize=18)
plt.show()
#1.选取特征工程2个
price = data[['Close', 'Low']]
scaler = MinMaxScaler(feature_range=(-1, 1))
price['Close'] = scaler.fit_transform(price['Close'].values.reshape(-1,1))
price['Low'] = scaler.fit_transform(price['Low'].values.reshape(-1,1))
#2.数据集的制作
def split_data(stock, lookback):
data_raw = stock.to_numpy()
data = []
for index in range(len(data_raw) - lookback):
data.append(data_raw[index: index + lookback])
data = np.array(data);
test_set_size = int(np.round(0.2 * data.shape[0]))
train_set_size = data.shape[0] - (test_set_size)
x_train = data[:train_set_size,:-1,:] #x_train.shape = (198, 4, 2)
y_train = data[:train_set_size,-1,0:1] #y_train.shape = (198, 1)
x_test = data[train_set_size:,:-1,:] #x_test.shape = (49, 4, 2)
y_test = data[train_set_size:,-1,0:1] #y_test.shape = (49, 1)
return [torch.Tensor(x_train), torch.Tensor(y_train), torch.Tensor(x_test),torch.Tensor(y_test)]
lookback = 5
x_train, y_train, x_test, y_test = split_data(price, lookback)
print('x_train.shape = ',x_train.shape)
print('y_train.shape = ',y_train.shape)
print('x_test.shape = ',x_test.shape)
print('y_test.shape = ',y_test.shape)
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(LSTM, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
input_dim = 2
hidden_dim = 32
num_layers = 2
output_dim = 1
num_epochs = 100
model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
criterion = torch.nn.MSELoss()
optimiser = torch.optim.Adam(model.parameters(), lr=0.01)
hist = np.zeros(num_epochs)
lstm = []
for t in range(num_epochs):
y_train_pred = model(x_train)
loss = criterion(y_train_pred, y_train)
hist[t] = loss.item()
# print("Epoch ", t, "MSE: ", loss.item())
optimiser.zero_grad()
loss.backward()
optimiser.step()
predict = pd.DataFrame(scaler.inverse_transform(y_train_pred.detach().numpy()))
original = pd.DataFrame(scaler.inverse_transform(y_train.detach().numpy()))
sns.set_style("darkgrid")
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2)
plt.subplot(1, 2, 1)
ax = sns.lineplot(x = original.index, y = original[0], label="Data", color='royalblue')
ax = sns.lineplot(x = predict.index, y = predict[0], label="Training Prediction (LSTM)", color='tomato')
ax.set_title('Stock price', size = 14, fontweight='bold')
ax.set_xlabel("Days", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
plt.subplot(1, 2, 2)
ax = sns.lineplot(data=hist, color='royalblue')
ax.set_xlabel("Epoch", size = 14)
ax.set_ylabel("Loss", size = 14)
ax.set_title("Training Loss", size = 14, fontweight='bold')
fig.set_figheight(6)
fig.set_figwidth(16)
# make predictions
y_test_pred = model(x_test)
# invert predictions
y_train_pred = scaler.inverse_transform(y_train_pred.detach().numpy())
y_train = scaler.inverse_transform(y_train.detach().numpy())
y_test_pred = scaler.inverse_transform(y_test_pred.detach().numpy())
y_test = scaler.inverse_transform(y_test.detach().numpy())
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
lstm.append(trainScore)
lstm.append(testScore)
lstm.append(training_time)
参考:https://gitee.com/qiangchen_sh/stock-prediction/blob/master/%E4%BB%A3%E7%A0%81/LSTM%E4%BB%8E%E7%90%86%E8%AE%BA%E5%9F%BA%E7%A1%80%E5%88%B0%E4%BB%A3%E7%A0%81%E5%AE%9E%E6%88%98%204%20%E5%A4%9A%E7%BB%B4%E7%89%B9%E5%BE%81%E8%82%A1%E7%A5%A8%E4%BB%B7%E6%A0%BC%E9%A2%84%E6%B5%8B_Pytorch.ipynb
以下是基于 PyTorch 的多对多预测代码示例,其中使用了数据集 "data.csv" 作为输入,前四列为特征,后面 50 列为标签,前 800 行为训练集,其余为测试集。
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.metrics import accura...
自古以来,我们就希望预知未来,现如今,随着大数据人工智能技术的发展,我们早已经不满足传统的同比、环比等数据分析方法,但是时间序列趋势预测的传统算法又很专业,很难用于日常生产经营中。
深度学习神经网络为我们提供较为通用的解决方案,我们将在这里实践基于Python Keras LSTM多维输入输出时序预测模型。
LSTM多维度项目背景观察数据进行处理特征选择合理的创建标题,有助于目录的生成如何改变文本的样式插入链接与图片如何插入一段漂亮的代码片生成一个适合你的列表创建一个表格设定内容居中、居左、居右SmartyPants创建一个自定义列表如何创建一个注脚注释也是必不可少的KaTeX数学公式新的甘特图功能,丰富你的文章UML 图表FLowchart流程图导出与导入导出导入
本篇主要是使用天池比赛中...
config中定义了项目所有需要的参数,可以在里面修改训练参数。其中可以看到三个卷积层的输出通道为[16,32,64]。根据实现效果调试[16,32,64]的训练效果比[32,64,128]要好很多,原因可能是数据集比较简单,通道数设置少一些就可以提取出不错的特征。若有朋友需要可运行的源码和数据集,可以guan注【科研小条】公众号,回复【股票预测2DLS】,即可获得。
原文来自stackoverflow的一个,以下都是自己的理解,欢迎大家批评指正
问题来源:关于keras中的stateful LSTM如何喂数据的问题
我个人认为也可以延伸出对于LSTM中滑动窗口的处理,滑动窗口中是否应该重复利用数据
下面继续在method模块中,定义我们的类LSTM。其中lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)相,其实就是pytorch的impose函数,他要接收一个三维的向量,因为LSTM的隐含层是需要接收三个参数的。# 定义LSTM模型# 构造函数,初始化网络使用# input_size:对应于输入中特征的数量。
数据集首先介绍一下我们的数据集,可以在我的github下载该数据集是一个污染数据集,我们需要用该多维时间序列去预测pollution这个维度构建训练数据首先我们删去数据中date,wnd_dir维(注:为了演示方便故不使用wnd_dir,其实可以通过代码将其转换为数字序列)data 再对于数据进行归一化处理,这里因为工程需要,笔者自写了最大最小归一化,可以使用sklearn的归一化函数代替#多维归...
可以看到trainX的shape为 (5,2) trainY为(5,1)
在进行训练的过程中要将trainX reshape为 (5,2,1)(LSTM的输入为 [samples, timesteps, features]
这里的timesteps为...