In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_percentage_error
import tensorflow as tf
from keras import Model
from keras.layers import Input, Dense, Dropout
from keras.layers import LSTM
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('Gold Price (2013-2022).csv')
df.head()
Out[ ]:
| Date | Price | Open | High | Low | Vol. | Change % | |
|---|---|---|---|---|---|---|---|
| 0 | 12/30/2022 | 1,826.20 | 1,821.80 | 1,832.40 | 1,819.80 | 107.50K | 0.01% |
| 1 | 12/29/2022 | 1,826.00 | 1,812.30 | 1,827.30 | 1,811.20 | 105.99K | 0.56% |
| 2 | 12/28/2022 | 1,815.80 | 1,822.40 | 1,822.80 | 1,804.20 | 118.08K | -0.40% |
| 3 | 12/27/2022 | 1,823.10 | 1,808.20 | 1,841.90 | 1,808.00 | 159.62K | 0.74% |
| 4 | 12/26/2022 | 1,809.70 | 1,805.80 | 1,811.95 | 1,805.55 | NaN | 0.30% |
In [2]:
df.shape
df.info()
df.describe()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 2583 entries, 0 to 2582 Data columns (total 7 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 2583 non-null object 1 Price 2583 non-null object 2 Open 2583 non-null object 3 High 2583 non-null object 4 Low 2583 non-null object 5 Vol. 2578 non-null object 6 Change % 2583 non-null object dtypes: object(7) memory usage: 141.4+ KB
Out[2]:
| Date | Price | Open | High | Low | Vol. | Change % | |
|---|---|---|---|---|---|---|---|
| count | 2583 | 2583 | 2583 | 2583 | 2583 | 2578 | 2583 |
| unique | 2583 | 2072 | 2061 | 2044 | 2019 | 1550 | 474 |
| top | 12/30/2022 | 1,294.30 | 1,284.00 | 1,220.00 | 1,314.00 | 0.02K | 0.00% |
| freq | 1 | 5 | 5 | 6 | 5 | 48 | 29 |
In [3]:
# 特征子集选择
# 由于我们不会使Change %特征来预测价格,我们将删除这两个特征:
df.drop(['Vol.', 'Change %'], axis=1, inplace=True)
In [4]:
# 日期特征以对象的形式存储在数据帧中。为了提高计算速度,我们将其数据类型转换为datetime,然后按升序对该特征进行排序:
df['Date'] = pd.to_datetime(df['Date'])
df.sort_values(by='Date', ascending=True, inplace=True)
df.reset_index(drop=True, inplace=True)
In [5]:
# “,”符号在数据集中是冗余的。首先,我们将其从整个数据集中移除,然后将数值变量的数据类型更改为float:
NumCols = df.columns.drop(['Date'])
df[NumCols] = df[NumCols].replace({',': ''}, regex=True)
df[NumCols] = df[NumCols].astype('float64')
df.head()
# 统计重复值情况
df.duplicated().sum()
# 统计缺失值情况
df.isnull().sum().sum()
Out[5]:
0
In [6]:
# 可视化黄金价格历史数据
# 互动黄金价格图表:
fig = px.line(y=df.Price, x=df.Date)
fig.update_traces(line_color='black')
fig.update_layout(xaxis_title="Date",
yaxis_title="Scaled Price",
title={'text': "Gold Price History Data", 'y':0.95, 'x':0.5, 'xanchor':'center', 'yanchor':'top'},
plot_bgcolor='rgba(255,223,0,0.8)')
In [7]:
# 将数据分割为训练集和测试集
# 由于我们不能对时间序列数据中的未来数据进行训练,所以我们不应该对时间序列数据进行随机分割。
# 在时间序列分割中,测试集总是晚于训练集。我们将最后一年的时间用于测试,其他时间用于培训:
test_size = df[df.Date.dt.year==2022].shape[0]
print(test_size)
# 黄金价格训练和测试集
plt.figure(figsize=(15, 6), dpi=150)
plt.rcParams['axes.facecolor'] = 'yellow'
plt.rc('axes',edgecolor='white')
plt.plot(df.Date[:-test_size], df.Price[:-test_size], color='black', lw=2)
plt.plot(df.Date[-test_size:], df.Price[-test_size:], color='blue', lw=2)
plt.title('Gold Price Training and Test Sets', fontsize=15)
plt.xlabel('Date', fontsize=12)
plt.ylabel('Price', fontsize=12)
plt.legend(['Training set', 'Test set'], loc='upper left', prop={'size': 15})
plt.grid(color='white')
plt.show()
260
In [8]:
# 数据缩放
# 由于我们的目标是仅根据其历史数据预测价格,我们使用MinMaxScaler缩放价格以避免密集的计算:
scaler = MinMaxScaler()
scaler.fit(df.Price.values.reshape(-1,1))
Out[8]:
MinMaxScaler()In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
MinMaxScaler()
In [9]:
# 重构数据并创建滑动窗口
# 利用前一个时间步长来预测为滑动窗口。
# 这样,时间序列数据就可以表示为监督学习。
# 我们可以通过使用前一个时间步骤作为输入变量间步骤作为输出变量来做到这一点。
# 前一个时间步长的数量称为窗口宽度。这里我们将窗口宽度设置为60。
# 因此,X_train和X_test将是包含60个时间戳价格的嵌套列
# y_train和y_test也是黄金价格列表,其中包含第二天的黄金价应X_train和X_test中的每个列表:
window_size = 60
# 训练集:
train_data = df.Price[:-test_size]
train_data = scaler.transform(train_data.values.reshape(-1,1))
X_train = []
y_train = []
for i in range(window_size, len(train_data)):
X_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i, 0])
# 测试集:
test_data = df.Price[-test_size-60:]
test_data = scaler.transform(test_data.values.reshape(-1,1))
X_test = []
y_test = []
for i in range(window_size, len(test_data)):
X_test.append(test_data[i-60:i, 0])
y_test.append(test_data[i, 0])
In [10]:
#将数据转换为Numpy数组
# 现在X_train和X_test是嵌套列表(二维列表),y_train是一维列表。我们需要将它们转换为更高维度的numpy数组,这是TensorFlow在训练神经网络时接受的数据格式:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
y_train = np.reshape(y_train, (-1,1))
y_test = np.reshape(y_test, (-1,1))
print('X_train Shape: ', X_train.shape)
print('y_train Shape: ', y_train.shape)
print('X_test Shape: ', X_test.shape)
print('y_test Shape: ', y_test.shape)
X_train Shape: (2263, 60, 1) y_train Shape: (2263, 1) X_test Shape: (260, 60, 1) y_test Shape: (260, 1)
In [11]:
# 创建LSTM网络
# 我们建立了一个LS归神经网络,旨在解决梯度消失问题:
# 模型定义:
def define_model():
input1 = Input(shape=(window_size,1))
x = LSTM(units = 64, return_sequences=True)(input1)
x = Dropout(0.2)(x)
x = LSTM(units = 64, return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(units = 64)(x)
x = Dropout(0.2)(x)
x = Dense(32, activation='softmax')(x)
dnn_output = Dense(1)(x)
model = Model(inputs=input1, outputs=[dnn_output])
model.compile(loss='mean_squared_error', optimizer='Nadam')
model.summary()
return model
# 模型训练:
model = define_model()
history = model.fit(X_train, y_train, epochs=150, batch_size=32, validation_split=0.1, verbose=1)
# 模型评价-接下来,我们使用MAPE(平均绝对百分比误差)度量来评估我们的时间序列预测:
result = model.evaluate(X_test, y_test)
y_pred = model.predict(X_test)
MAPE = mean_absolute_percentage_error(y_test, y_pred)
Accuracy = 1 - MAPE
print("Test Loss:", result)
print("Test MAPE:", MAPE)
print("Test Accuracy:", Accuracy)
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 60, 1)] 0
lstm (LSTM) (None, 60, 64) 16896
dropout (Dropout) (None, 60, 64) 0
lstm_1 (LSTM) (None, 60, 64) 33024
dropout_1 (Dropout) (None, 60, 64) 0
lstm_2 (LSTM) (None, 64) 33024
dropout_2 (Dropout) (None, 64) 0
dense (Dense) (None, 32) 2080
dense_1 (Dense) (None, 1) 33
=================================================================
Total params: 85057 (332.25 KB)
Trainable params: 85057 (332.25 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
Epoch 1/150
64/64 [==============================] - 8s 53ms/step - loss: 0.0321 - val_loss: 0.0640
Epoch 2/150
64/64 [==============================] - 3s 42ms/step - loss: 0.0100 - val_loss: 0.0292
Epoch 3/150
64/64 [==============================] - 3s 42ms/step - loss: 0.0061 - val_loss: 0.0118
Epoch 4/150
64/64 [==============================] - 3s 41ms/step - loss: 0.0041 - val_loss: 0.0045
Epoch 5/150
64/64 [==============================] - 3s 44ms/step - loss: 0.0028 - val_loss: 0.0022
Epoch 6/150
64/64 [==============================] - 3s 44ms/step - loss: 0.0021 - val_loss: 0.0024
Epoch 7/150
64/64 [==============================] - 3s 45ms/step - loss: 0.0017 - val_loss: 0.0034
Epoch 8/150
64/64 [==============================] - 3s 42ms/step - loss: 0.0014 - val_loss: 0.0050
Epoch 9/150
64/64 [==============================] - 3s 41ms/step - loss: 0.0012 - val_loss: 0.0024
Epoch 10/150
64/64 [==============================] - 3s 41ms/step - loss: 0.0012 - val_loss: 0.0092
Epoch 11/150
64/64 [==============================] - 3s 42ms/step - loss: 0.0011 - val_loss: 0.0096
Epoch 12/150
64/64 [==============================] - 3s 41ms/step - loss: 0.0011 - val_loss: 0.0026
Epoch 13/150
64/64 [==============================] - 3s 43ms/step - loss: 9.6190e-04 - val_loss: 0.0026
Epoch 14/150
64/64 [==============================] - 3s 43ms/step - loss: 9.7260e-04 - val_loss: 0.0012
Epoch 15/150
64/64 [==============================] - 3s 41ms/step - loss: 9.3294e-04 - val_loss: 0.0024
Epoch 16/150
64/64 [==============================] - 3s 42ms/step - loss: 8.6953e-04 - val_loss: 0.0025
Epoch 17/150
64/64 [==============================] - 3s 42ms/step - loss: 8.4747e-04 - val_loss: 9.2853e-04
Epoch 18/150
64/64 [==============================] - 3s 42ms/step - loss: 8.1358e-04 - val_loss: 0.0016
Epoch 19/150
64/64 [==============================] - 3s 43ms/step - loss: 7.5372e-04 - val_loss: 0.0011
Epoch 20/150
64/64 [==============================] - 3s 42ms/step - loss: 7.4762e-04 - val_loss: 0.0047
Epoch 21/150
64/64 [==============================] - 3s 43ms/step - loss: 7.0025e-04 - val_loss: 0.0013
Epoch 22/150
64/64 [==============================] - 3s 43ms/step - loss: 6.6522e-04 - val_loss: 0.0016
Epoch 23/150
64/64 [==============================] - 3s 44ms/step - loss: 6.4477e-04 - val_loss: 0.0014
Epoch 24/150
64/64 [==============================] - 3s 43ms/step - loss: 6.8105e-04 - val_loss: 9.0749e-04
Epoch 25/150
64/64 [==============================] - 3s 41ms/step - loss: 6.1246e-04 - val_loss: 0.0044
Epoch 26/150
64/64 [==============================] - 3s 42ms/step - loss: 6.3749e-04 - val_loss: 8.5409e-04
Epoch 27/150
64/64 [==============================] - 3s 42ms/step - loss: 6.2825e-04 - val_loss: 8.9216e-04
Epoch 28/150
64/64 [==============================] - 3s 41ms/step - loss: 6.0927e-04 - val_loss: 0.0012
Epoch 29/150
64/64 [==============================] - 3s 41ms/step - loss: 6.0065e-04 - val_loss: 8.8506e-04
Epoch 30/150
64/64 [==============================] - 3s 43ms/step - loss: 5.5441e-04 - val_loss: 8.9730e-04
Epoch 31/150
64/64 [==============================] - 3s 44ms/step - loss: 5.0762e-04 - val_loss: 0.0015
Epoch 32/150
64/64 [==============================] - 3s 44ms/step - loss: 5.1099e-04 - val_loss: 6.6454e-04
Epoch 33/150
64/64 [==============================] - 3s 42ms/step - loss: 5.4684e-04 - val_loss: 5.1916e-04
Epoch 34/150
64/64 [==============================] - 3s 41ms/step - loss: 5.2745e-04 - val_loss: 0.0017
Epoch 35/150
64/64 [==============================] - 3s 41ms/step - loss: 5.0746e-04 - val_loss: 0.0021
Epoch 36/150
64/64 [==============================] - 3s 41ms/step - loss: 5.1558e-04 - val_loss: 6.7546e-04
Epoch 37/150
64/64 [==============================] - 3s 41ms/step - loss: 4.9545e-04 - val_loss: 4.6143e-04
Epoch 38/150
64/64 [==============================] - 3s 41ms/step - loss: 4.8045e-04 - val_loss: 0.0012
Epoch 39/150
64/64 [==============================] - 3s 41ms/step - loss: 4.8094e-04 - val_loss: 5.3234e-04
Epoch 40/150
64/64 [==============================] - 3s 43ms/step - loss: 4.6742e-04 - val_loss: 7.1383e-04
Epoch 41/150
64/64 [==============================] - 3s 42ms/step - loss: 4.9598e-04 - val_loss: 7.0685e-04
Epoch 42/150
64/64 [==============================] - 3s 41ms/step - loss: 4.9236e-04 - val_loss: 7.0716e-04
Epoch 43/150
64/64 [==============================] - 3s 41ms/step - loss: 4.8243e-04 - val_loss: 6.7091e-04
Epoch 44/150
64/64 [==============================] - 3s 42ms/step - loss: 4.3031e-04 - val_loss: 5.3525e-04
Epoch 45/150
64/64 [==============================] - 3s 41ms/step - loss: 4.5661e-04 - val_loss: 0.0011
Epoch 46/150
64/64 [==============================] - 3s 41ms/step - loss: 4.6853e-04 - val_loss: 4.6639e-04
Epoch 47/150
64/64 [==============================] - 3s 43ms/step - loss: 4.6719e-04 - val_loss: 0.0013
Epoch 48/150
64/64 [==============================] - 3s 43ms/step - loss: 4.2529e-04 - val_loss: 0.0010
Epoch 49/150
64/64 [==============================] - 3s 42ms/step - loss: 4.2058e-04 - val_loss: 5.8498e-04
Epoch 50/150
64/64 [==============================] - 3s 41ms/step - loss: 4.4022e-04 - val_loss: 6.7118e-04
Epoch 51/150
64/64 [==============================] - 3s 41ms/step - loss: 4.2780e-04 - val_loss: 4.0248e-04
Epoch 52/150
64/64 [==============================] - 3s 41ms/step - loss: 4.1235e-04 - val_loss: 8.6338e-04
Epoch 53/150
64/64 [==============================] - 3s 41ms/step - loss: 4.1155e-04 - val_loss: 0.0016
Epoch 54/150
64/64 [==============================] - 3s 41ms/step - loss: 4.2123e-04 - val_loss: 0.0013
Epoch 55/150
64/64 [==============================] - 3s 43ms/step - loss: 4.2052e-04 - val_loss: 0.0017
Epoch 56/150
64/64 [==============================] - 3s 42ms/step - loss: 4.1417e-04 - val_loss: 4.4881e-04
Epoch 57/150
64/64 [==============================] - 3s 42ms/step - loss: 4.2485e-04 - val_loss: 0.0011
Epoch 58/150
64/64 [==============================] - 3s 41ms/step - loss: 4.2969e-04 - val_loss: 5.1153e-04
Epoch 59/150
64/64 [==============================] - 3s 41ms/step - loss: 3.8521e-04 - val_loss: 9.0119e-04
Epoch 60/150
64/64 [==============================] - 3s 43ms/step - loss: 4.1602e-04 - val_loss: 6.4123e-04
Epoch 61/150
64/64 [==============================] - 3s 41ms/step - loss: 4.0514e-04 - val_loss: 4.6810e-04
Epoch 62/150
64/64 [==============================] - 3s 40ms/step - loss: 3.9204e-04 - val_loss: 4.6806e-04
Epoch 63/150 64/64 [==============================] - 3s 41ms/step - loss: 3.9732e-04 - val_loss: 3.9156e-04 Epoch 64/150 64/64 [==============================] - 3s 41ms/step - loss: 4.0976e-04 - val_loss: 4.0244e-04 Epoch 65/150 64/64 [==============================] - 3s 41ms/step - loss: 3.7852e-04 - val_loss: 0.0023 Epoch 66/150 64/64 [==============================] - 3s 40ms/step - loss: 4.0495e-04 - val_loss: 4.5557e-04 Epoch 67/150 64/64 [==============================] - 3s 43ms/step - loss: 3.8202e-04 - val_loss: 4.8514e-04 Epoch 68/150 64/64 [==============================] - 3s 42ms/step - loss: 3.8470e-04 - val_loss: 0.0028 Epoch 69/150 64/64 [==============================] - 3s 42ms/step - loss: 3.6568e-04 - val_loss: 7.6983e-04 Epoch 70/150 64/64 [==============================] - 3s 41ms/step - loss: 3.6573e-04 - val_loss: 0.0030 Epoch 71/150 64/64 [==============================] - 3s 43ms/step - loss: 3.6053e-04 - val_loss: 0.0015 Epoch 72/150 64/64 [==============================] - 3s 43ms/step - loss: 3.5913e-04 - val_loss: 3.9100e-04 Epoch 73/150 64/64 [==============================] - 3s 42ms/step - loss: 3.8915e-04 - val_loss: 4.4214e-04 Epoch 74/150 64/64 [==============================] - 3s 41ms/step - loss: 3.7659e-04 - val_loss: 0.0013 Epoch 75/150 64/64 [==============================] - 3s 41ms/step - loss: 3.7555e-04 - val_loss: 6.8412e-04 Epoch 76/150 64/64 [==============================] - 3s 41ms/step - loss: 3.5001e-04 - val_loss: 6.8944e-04 Epoch 77/150 64/64 [==============================] - 3s 41ms/step - loss: 3.5941e-04 - val_loss: 0.0012 Epoch 78/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4940e-04 - val_loss: 4.4683e-04 Epoch 79/150 64/64 [==============================] - 3s 42ms/step - loss: 3.6179e-04 - val_loss: 5.6628e-04 Epoch 80/150 64/64 [==============================] - 3s 43ms/step - loss: 3.4766e-04 - val_loss: 0.0013 Epoch 81/150 64/64 [==============================] - 3s 43ms/step - loss: 3.6868e-04 - val_loss: 6.6750e-04 Epoch 82/150 64/64 [==============================] - 3s 42ms/step - loss: 3.9199e-04 - val_loss: 4.7699e-04 Epoch 83/150 64/64 [==============================] - 3s 43ms/step - loss: 3.2747e-04 - val_loss: 4.8085e-04 Epoch 84/150 64/64 [==============================] - 3s 42ms/step - loss: 3.9831e-04 - val_loss: 3.7725e-04 Epoch 85/150 64/64 [==============================] - 3s 42ms/step - loss: 3.3774e-04 - val_loss: 9.9461e-04 Epoch 86/150 64/64 [==============================] - 3s 41ms/step - loss: 3.6334e-04 - val_loss: 6.1154e-04 Epoch 87/150 64/64 [==============================] - 3s 41ms/step - loss: 3.5918e-04 - val_loss: 5.7359e-04 Epoch 88/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4191e-04 - val_loss: 0.0015 Epoch 89/150 64/64 [==============================] - 3s 40ms/step - loss: 3.6416e-04 - val_loss: 0.0010 Epoch 90/150 64/64 [==============================] - 3s 40ms/step - loss: 3.6271e-04 - val_loss: 0.0011 Epoch 91/150 64/64 [==============================] - 3s 43ms/step - loss: 3.3302e-04 - val_loss: 7.3398e-04 Epoch 92/150 64/64 [==============================] - 3s 42ms/step - loss: 3.3337e-04 - val_loss: 6.0961e-04 Epoch 93/150 64/64 [==============================] - 3s 40ms/step - loss: 3.5242e-04 - val_loss: 3.8243e-04 Epoch 94/150 64/64 [==============================] - 3s 40ms/step - loss: 3.4051e-04 - val_loss: 8.4704e-04 Epoch 95/150 64/64 [==============================] - 3s 41ms/step - loss: 3.3330e-04 - val_loss: 6.3142e-04 Epoch 96/150 64/64 [==============================] - 3s 41ms/step - loss: 3.6388e-04 - val_loss: 6.3372e-04 Epoch 97/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4811e-04 - val_loss: 0.0012 Epoch 98/150 64/64 [==============================] - 3s 40ms/step - loss: 3.4982e-04 - val_loss: 5.6578e-04 Epoch 99/150 64/64 [==============================] - 3s 41ms/step - loss: 3.3289e-04 - val_loss: 5.2191e-04 Epoch 100/150 64/64 [==============================] - 3s 42ms/step - loss: 3.6167e-04 - val_loss: 0.0018 Epoch 101/150 64/64 [==============================] - 3s 43ms/step - loss: 3.4215e-04 - val_loss: 6.3912e-04 Epoch 102/150 64/64 [==============================] - 3s 42ms/step - loss: 3.4477e-04 - val_loss: 0.0025 Epoch 103/150 64/64 [==============================] - 3s 41ms/step - loss: 3.3719e-04 - val_loss: 0.0011 Epoch 104/150 64/64 [==============================] - 3s 41ms/step - loss: 3.5603e-04 - val_loss: 5.1924e-04 Epoch 105/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2249e-04 - val_loss: 4.8991e-04 Epoch 106/150 64/64 [==============================] - 3s 42ms/step - loss: 3.1695e-04 - val_loss: 6.0637e-04 Epoch 107/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0839e-04 - val_loss: 5.6497e-04 Epoch 108/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4659e-04 - val_loss: 6.7425e-04 Epoch 109/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1914e-04 - val_loss: 0.0016 Epoch 110/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4871e-04 - val_loss: 5.9434e-04 Epoch 111/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1861e-04 - val_loss: 6.1605e-04 Epoch 112/150 64/64 [==============================] - 3s 40ms/step - loss: 3.0933e-04 - val_loss: 4.4084e-04 Epoch 113/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4101e-04 - val_loss: 8.0315e-04 Epoch 114/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1974e-04 - val_loss: 8.3539e-04 Epoch 115/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1868e-04 - val_loss: 0.0025 Epoch 116/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2427e-04 - val_loss: 0.0012 Epoch 117/150 64/64 [==============================] - 3s 41ms/step - loss: 3.4272e-04 - val_loss: 6.0519e-04 Epoch 118/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0579e-04 - val_loss: 8.5936e-04 Epoch 119/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0158e-04 - val_loss: 7.0885e-04 Epoch 120/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2929e-04 - val_loss: 6.2783e-04 Epoch 121/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0826e-04 - val_loss: 0.0015 Epoch 122/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2006e-04 - val_loss: 8.0327e-04 Epoch 123/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1442e-04 - val_loss: 7.2547e-04 Epoch 124/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0459e-04 - val_loss: 0.0020 Epoch 125/150 64/64 [==============================] - 3s 40ms/step - loss: 3.1910e-04 - val_loss: 0.0016 Epoch 126/150 64/64 [==============================] - 3s 40ms/step - loss: 3.3021e-04 - val_loss: 7.8316e-04 Epoch 127/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2082e-04 - val_loss: 6.8615e-04 Epoch 128/150 64/64 [==============================] - 3s 40ms/step - loss: 3.0147e-04 - val_loss: 5.9937e-04 Epoch 129/150 64/64 [==============================] - 3s 40ms/step - loss: 3.1647e-04 - val_loss: 8.4360e-04 Epoch 130/150 64/64 [==============================] - 3s 40ms/step - loss: 3.2435e-04 - val_loss: 6.4242e-04 Epoch 131/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1356e-04 - val_loss: 0.0016 Epoch 132/150 64/64 [==============================] - 3s 40ms/step - loss: 3.2585e-04 - val_loss: 0.0011 Epoch 133/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1214e-04 - val_loss: 0.0010 Epoch 134/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0725e-04 - val_loss: 8.7106e-04 Epoch 135/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2175e-04 - val_loss: 0.0014 Epoch 136/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2994e-04 - val_loss: 0.0012 Epoch 137/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0606e-04 - val_loss: 0.0012 Epoch 138/150 64/64 [==============================] - 3s 40ms/step - loss: 2.9687e-04 - val_loss: 8.2415e-04
Epoch 139/150 64/64 [==============================] - 3s 42ms/step - loss: 2.9647e-04 - val_loss: 6.2688e-04 Epoch 140/150 64/64 [==============================] - 3s 41ms/step - loss: 3.2118e-04 - val_loss: 0.0010 Epoch 141/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1725e-04 - val_loss: 6.5988e-04 Epoch 142/150 64/64 [==============================] - 3s 40ms/step - loss: 3.0632e-04 - val_loss: 9.2993e-04 Epoch 143/150 64/64 [==============================] - 3s 41ms/step - loss: 2.9337e-04 - val_loss: 7.8898e-04 Epoch 144/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1445e-04 - val_loss: 6.9149e-04 Epoch 145/150 64/64 [==============================] - 3s 40ms/step - loss: 3.0692e-04 - val_loss: 0.0012 Epoch 146/150 64/64 [==============================] - 3s 40ms/step - loss: 2.9822e-04 - val_loss: 8.4767e-04 Epoch 147/150 64/64 [==============================] - 3s 40ms/step - loss: 2.9466e-04 - val_loss: 0.0015 Epoch 148/150 64/64 [==============================] - 3s 41ms/step - loss: 2.9781e-04 - val_loss: 0.0016 Epoch 149/150 64/64 [==============================] - 3s 41ms/step - loss: 3.1984e-04 - val_loss: 6.5720e-04 Epoch 150/150 64/64 [==============================] - 3s 41ms/step - loss: 3.0368e-04 - val_loss: 0.0010 9/9 [==============================] - 0s 15ms/step - loss: 8.8753e-04 9/9 [==============================] - 1s 15ms/step Test Loss: 0.0008875320199877024 Test MAPE: 0.033008416793448715 Test Accuracy: 0.9669915832065513
In [15]:
# 可视化结果
# 将实际和预测的Price值返回到它们的原始刻度:
y_test_true = scaler.inverse_transform(y_test)
y_test_pred = scaler.inverse_transform(y_pred)
# 调查模型预测的价格与实际价格的接近程度:
plt.figure(figsize=(15, 6), dpi=150)
plt.rcParams['axes.facecolor'] = 'yellow'
plt.rc('axes',edgecolor='white')
plt.plot(df['Date'].iloc[:-test_size], scaler.inverse_transform(train_data), color='black', lw=2)
plt.plot(df['Date'].iloc[-test_size:], y_test_true, color='blue', lw=2)
plt.plot(df['Date'].iloc[-test_size:], y_test_pred, color='red', lw=2)
plt.title('Model Performance on Gold Price Prediction', fontsize=15)
plt.xlabel('Date', fontsize=12)
plt.ylabel('Price', fontsize=12)
plt.legend(['Training Data', 'Actual Test Data', 'Predicted Test Data'], loc='upper left', prop={'size': 15})
plt.grid(color='white')
plt.show()
In [ ]: