作者: 明天依旧可好
QQ交流群: 807041986
原文链接: https://mtyjkh.blog.csdn.net/article/details/115612319
深度学习系列:深度学习
我的环境: win10、jupyter notebook、tensorflow2
0.导入相关包设置相关参数
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as npfrom numpy import array
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense,LSTM,Bidirectional# 数据调节
from scipy.ndimage import gaussian_filter1d
from scipy.signal import medfilt# 确保结果可以重现
from numpy.random import seed
seed(1)
tf.random.set_seed(1)# 设置相关参数
n_timestamp = 24 # 时间戳
train_days = 3500 # 用于训练的天数
testing_days = 1500 # 用于预测的天数
n_epochs = 25 # 训练轮数
filter_on = 1 # 激活数据过滤器
# ====================================
# 选择模型:
# 1: 单层 LSTM
# 2: 多层 LSTM
# 3: 双向 LSTM
# ====================================
model_type = 1
1.读取数据
原始数据是以小时为单位的。
url = "./data/AEP_hourly.csv"
dataset = pd.read_csv(url)
dataset.head()
2.将数据整合成以天为单位
以空格为切割符,将Datetime字段后的具体时间点切割掉
dataset_new = dataset
for index, row in dataset.iterrows():dataset_new["Datetime"][index] = row["Datetime"].split(" ")[0]dataset_new.head()
经过上面的切割后每一天有24份数据(分别对应24个时辰),接下来合并这24份数据。
dataset_new_2 = dataset_new.groupby(by='Datetime')['AEP_MW'].sum()*0.00001dict_dataset = {'Datetime':dataset_new_2.index,'AEP_MW':dataset_new_2.values}
dataset_new_3 = pd.DataFrame(dict_dataset)
dataset_new_3.head()
3.数据预处理
采用高斯过滤和中值过滤,关于中值过滤的相关问题,可以参考我这篇文章【中值滤波】。
dataset = dataset_new_3if filter_on == 1: # 数据集过滤dataset['AEP_MW'] = medfilt(dataset['AEP_MW'], 3) # 中值过滤dataset['AEP_MW'] = gaussian_filter1d(dataset['AEP_MW'], 1.2) # 高斯过滤dataset
4.将数据拆分
# 设置训练和测试数据集
train_set = dataset[0:train_days].reset_index(drop=True)
test_set = dataset[train_days: train_days+testing_days].reset_index(drop=True)
training_set = train_set.iloc[:, 1:2].values
testing_set = test_set.iloc[:, 1:2].values# 将数据标准化,范围是0到1
sc = MinMaxScaler(feature_range=(0, 1))
training_set_scaled = sc.fit_transform(training_set)
testing_set_scaled = sc.fit_transform(testing_set)
5.时间戳函数
# 取前 n_timestamp 天的数据为 X;n_timestamp+1天数据为 Y。
def data_split(sequence, n_timestamp):X = []y = []for i in range(len(sequence)):end_ix = i + n_timestampif end_ix > len(sequence)-1:breakseq_x, seq_y = sequence[i:end_ix], sequence[end_ix]X.append(seq_x)y.append(seq_y)return array(X), array(y)X_train, y_train = data_split(training_set_scaled, n_timestamp)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test, y_test = data_split(testing_set_scaled, n_timestamp)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
6.模型构建
# 使用 Keras建构 LSTM模型
if model_type == 1:# 单层 LSTMmodel = Sequential()model.add(LSTM(units=50, activation='relu',input_shape=(X_train.shape[1], 1)))model.add(Dense(units=1))
if model_type == 2:# 多层 LSTMmodel = Sequential()model.add(LSTM(units=50, activation='relu', return_sequences=True,input_shape=(X_train.shape[1], 1)))model.add(LSTM(units=50, activation='relu'))model.add(Dense(1))
if model_type == 3:# 双向 LSTMmodel = Sequential()model.add(Bidirectional(LSTM(50, activation='relu'),input_shape=(X_train.shape[1], 1)))model.add(Dense(1))model.summary() # 输出模型结构
7.模型训练
# 模型训练,batch_size越大越精准,训练消耗越大
model.compile(optimizer='adam', loss='mean_squared_error')
history = model.fit(X_train, y_train, epochs=n_epochs, batch_size=32)
loss = history.history['loss']
epochs = range(len(loss))# 得到测试集数据集的预测值
y_predicted = model.predict(X_test)# 将数据还原
y_predicted_descaled = sc.inverse_transform(y_predicted)
y_train_descaled = sc.inverse_transform(y_train)
y_test_descaled = sc.inverse_transform(y_test)
y_pred = y_predicted.ravel() # 将多维数组转换为一维数组
y_pred = [round(i, 2) for i in y_pred] # 保留两位小数
y_tested = y_test.ravel() # 将多维数组转换为一维数组
8.可视化结果
# 进行模型预测
y_predicted = model.predict(X_test)# 显示预测结果
plt.figure(figsize=(8, 7))plt.subplot(3, 2, 3)
plt.plot(y_test_descaled, color='black', linewidth=1, label='True value')
plt.plot(y_predicted_descaled, color='red', linewidth=1, label='Predicted')
plt.legend(frameon=False)
plt.ylabel("AEP_MW")
plt.xlabel("Day")
plt.title("Predicted data (n days)")plt.subplot(3, 2, 4)
plt.plot(y_test_descaled[:60], color='black', linewidth=1, label='True value')
plt.plot(y_predicted_descaled[:60], color='red', label='Predicted')
plt.legend(frameon=False)
plt.ylabel("AEP_MW")
plt.xlabel("Day")
plt.title("Predicted data (first 60 days)")plt.subplot(3, 3, 7)
plt.plot(epochs, loss, color='black')
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.title("Training curve")plt.subplot(3, 3, 8)
plt.plot(y_test_descaled-y_predicted_descaled, color='black')
plt.ylabel("Residual")
plt.xlabel("Day")
plt.title("Residual plot")plt.subplot(3, 3, 9)
plt.scatter(y_predicted_descaled, y_test_descaled, s=2, color='black')
plt.ylabel("Y true")
plt.xlabel("Y predicted")
plt.title("Scatter plot")plt.subplots_adjust(hspace=0.5, wspace=0.3)
plt.show()# 自己定义 MAPE 函数
def mape(y_true, y_pred):return np.mean(np.abs((y_pred - y_true) / y_true)) * 100MSE = metrics.mean_squared_error(y_test_descaled, y_predicted_descaled) # MSE均方误差
RMSE = metrics.mean_squared_error(y_test_descaled, y_predicted_descaled)**0.5
MAE = metrics.mean_absolute_error(y_test_descaled, y_predicted_descaled) # MAE平方绝对误差
MAPE = mape(y_test_descaled, y_predicted_descaled)
r2 = metrics.r2_score(y_test_descaled, y_predicted_descaled) # 决定系数(拟合优度)接近1越好print("MSE = " + str(round(MSE, 5)))
print("RMSE = " + str(round(RMSE, 5)))
print("MAE = " + str(round(MAE, 5)))
print("MAPE = " + str(round(MAPE, 5)))
print("R2 = " + str(round(r2, 5)))
代码和数据传送门:https://download.csdn.net/download/qq_38251616/16651321