In [2]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout, BatchNormalization,Dense
# to read the csv, please download here.
# Training data
# https://drive.google.com/file/d/1l2JEaxxpmYXxWjKxsEYRn4zbJhc0EZpf/view?usp=sharing
# Testing data
# https://drive.google.com/file/d/1ajUGJ1X338kOjm4ZNwRGV60xiZ9bgMq2/view?usp=sharing
# If you are using colab, after download it, please upload to the working directionary on the left.
# Then you can run the code to read_csv
# If you are using local computer, please modify the file path below.
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
dataset_train.head()
Out[2]:
Date | Open | High | Low | Close | Volume | |
---|---|---|---|---|---|---|
0 | 01/03/2012 | 325.25 | 332.83 | 324.97 | 663.59 | 7,380,500 |
1 | 01/04/2012 | 331.27 | 333.87 | 329.08 | 666.45 | 5,749,400 |
2 | 01/05/2012 | 329.83 | 330.75 | 326.89 | 657.21 | 6,590,300 |
3 | 01/06/2012 | 328.34 | 328.77 | 323.68 | 648.24 | 5,405,900 |
4 | 01/09/2012 | 322.04 | 322.29 | 309.46 | 620.76 | 11,688,800 |
In [13]:
#!pip install --upgrade tensorflow
In [3]:
#keras only takes numpy array
training_set = dataset_train.iloc[:, 1: 2].values
training_set.shape
plt.figure(figsize=(6,3))
plt.plot(dataset_train['Open'])
plt.title("Google Stock Open Prices")
plt.xlabel("Time (oldest -> latest)")
plt.ylabel("Stock Open Price")
plt.show()
In [4]:
sc = MinMaxScaler(feature_range = (0, 1))
#fit: get min/max of train data
training_set_scaled = sc.fit_transform(training_set)
print(training_set.shape)
print(training_set_scaled.shape)
(1509, 1) (1509, 1)
In [5]:
## 60 timesteps and 1 output
steps = 60
X_train = []
y_train = []
for i in range(steps, len(training_set_scaled)):
X_train.append(training_set_scaled[i-steps: i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, newshape =(X_train.shape[0], X_train.shape[1], 1))
X_train.shape
Out[5]:
(1449, 60, 1)
In [6]:
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
dataset_test.head()
#keras only takes numpy array
real_stock_price = dataset_test.iloc[:, 1: 2].values
real_stock_price.shape
#vertical concat use 0, horizontal uses 1
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']),
axis = 0)
##use .values to make numpy array
inputs = dataset_total[len(dataset_total) - len(dataset_test) - steps:].values
#reshape data to only have 1 col
inputs = inputs.reshape(-1, 1)
#scale input
inputs = sc.transform(inputs)
X_test = []
for i in range(steps, len(inputs)):
X_test.append(inputs[i-steps:i, 0])
X_test = np.array(X_test)
#add dimension of indicator
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_test.shape)
(125, 60, 1)
In [7]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dropout, Dense, ReLU
# Initialize the Sequential model
model = Sequential()
# Add 1st LSTM layer (return_sequences=True)
model.add(LSTM(units=52, return_sequences=True, input_shape=(X_train.shape[1], 1),
activation='relu', # Activation function for output gate
recurrent_activation='sigmoid'))
#model.add(Dropout(rate=0.3))
# Add 2nd LSTM layer (return_sequences=True)
model.add(LSTM(units=16, return_sequences=False,activation='relu', # Activation function for output gate
recurrent_activation='sigmoid'))
#model.add(Dropout(rate=0.3))
# Add 3rd LSTM layer (return_sequences=False, since it's the last LSTM layer)
#regressor.add(LSTM(units=64, return_sequences=False))
#regressor.add(Dropout(rate=0.2))
# Add output layer (Dense layer with one output for regression)
#regressor.add(Dense(units=6))
model.add(Dense(units=1, activation='relu'))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# Summary of the model
print(model.summary())
model.fit(x = X_train, y = y_train, batch_size = 32, epochs = 30,validation_split=0.2)
predicted_stock_price = model.predict(X_test)
#inverse the scaled value
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(**kwargs)
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ lstm (LSTM) │ (None, 60, 52) │ 11,232 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ lstm_1 (LSTM) │ (None, 16) │ 4,416 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense (Dense) │ (None, 1) │ 17 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 15,665 (61.19 KB)
Trainable params: 15,665 (61.19 KB)
Non-trainable params: 0 (0.00 B)
None Epoch 1/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 6s 73ms/step - loss: 0.0750 - val_loss: 0.0018 Epoch 2/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 58ms/step - loss: 0.0014 - val_loss: 0.0083 Epoch 3/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 85ms/step - loss: 6.4948e-04 - val_loss: 0.0042 Epoch 4/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 51ms/step - loss: 6.4922e-04 - val_loss: 0.0037 Epoch 5/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 54ms/step - loss: 5.3546e-04 - val_loss: 0.0029 Epoch 6/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 52ms/step - loss: 5.1008e-04 - val_loss: 0.0055 Epoch 7/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 61ms/step - loss: 5.2052e-04 - val_loss: 0.0019 Epoch 8/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 81ms/step - loss: 5.4289e-04 - val_loss: 0.0017 Epoch 9/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 50ms/step - loss: 5.3451e-04 - val_loss: 0.0029 Epoch 10/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 49ms/step - loss: 4.9438e-04 - val_loss: 0.0047 Epoch 11/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 49ms/step - loss: 5.5105e-04 - val_loss: 0.0011 Epoch 12/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 49ms/step - loss: 3.8230e-04 - val_loss: 0.0014 Epoch 13/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 50ms/step - loss: 4.8923e-04 - val_loss: 8.4947e-04 Epoch 14/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 79ms/step - loss: 4.6211e-04 - val_loss: 0.0012 Epoch 15/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 59ms/step - loss: 3.9790e-04 - val_loss: 9.5497e-04 Epoch 16/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 56ms/step - loss: 5.8368e-04 - val_loss: 0.0011 Epoch 17/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 48ms/step - loss: 3.8443e-04 - val_loss: 0.0015 Epoch 18/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 51ms/step - loss: 3.5271e-04 - val_loss: 0.0042 Epoch 19/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 79ms/step - loss: 5.1585e-04 - val_loss: 7.0276e-04 Epoch 20/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 63ms/step - loss: 4.1841e-04 - val_loss: 6.4166e-04 Epoch 21/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 50ms/step - loss: 4.5032e-04 - val_loss: 9.9880e-04 Epoch 22/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 52ms/step - loss: 3.9691e-04 - val_loss: 0.0017 Epoch 23/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 49ms/step - loss: 3.9491e-04 - val_loss: 6.4021e-04 Epoch 24/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 49ms/step - loss: 3.1938e-04 - val_loss: 0.0011 Epoch 25/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 78ms/step - loss: 3.4534e-04 - val_loss: 0.0011 Epoch 26/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 49ms/step - loss: 3.7845e-04 - val_loss: 5.1618e-04 Epoch 27/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 48ms/step - loss: 3.4205e-04 - val_loss: 5.2175e-04 Epoch 28/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 48ms/step - loss: 3.4234e-04 - val_loss: 4.3003e-04 Epoch 29/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 68ms/step - loss: 3.3639e-04 - val_loss: 4.4719e-04 Epoch 30/30 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 73ms/step - loss: 3.1749e-04 - val_loss: 0.0012 4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 212ms/step
In [8]:
fig = plt.figure(figsize=(12, 5))
ax1 = plt.subplot2grid((1, 2), (0, 0)) # 1 row, 2 columns, position (0, 0)
ax1.plot(real_stock_price, color='red', label='Real Price')
ax1.plot(predicted_stock_price, color='blue', label='Predicted Price')
ax1.set_title('Google Price Prediction (Time Series)')
ax1.set_xlabel('Time')
ax1.set_ylabel('Price')
ax1.legend()
ax1.grid(True)
x = np.linspace(900, 1200, 100)
ax2 = plt.subplot2grid((1, 2), (0, 1)) # 1 row, 2 columns, position (0, 1)
ax2.plot(x, x, color='red', linestyle='-', label='Equivalent Line')
ax2.plot(real_stock_price, predicted_stock_price, 'b.', label='Pred. vs. Obs.')
ax2.set_title('Google Price Prediction (Scatter)')
ax2.set_xlabel('Observed')
ax2.set_ylabel('Predicted')
ax2.set_aspect('equal')
ax2.set_xlim([900, 1200])
ax2.set_ylim([900, 1200])
ax2.grid(True)
ax2.legend()
plt.tight_layout()
plt.show()
In [9]:
from tensorflow.keras.layers import SimpleRNN, BatchNormalization
# Create RNN model
model_RNN = Sequential([
SimpleRNN(52, return_sequences=True, input_shape=(X_train.shape[1], 1),
activation='relu'), # RNN Layer 1 # Batch Normalization
SimpleRNN(16, return_sequences=False), # RNN Layer 2 # Batch Normalization
Dense(1, activation='relu') # Output Layer
])
# Compile the model
model_RNN.compile(optimizer='adam', loss='mse')
print(model_RNN.summary())
model_RNN.fit(x = X_train, y = y_train,batch_size=32,epochs=50,validation_split=0.2)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(**kwargs)
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ simple_rnn (SimpleRNN) │ (None, 60, 52) │ 2,808 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ simple_rnn_1 (SimpleRNN) │ (None, 16) │ 1,104 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_1 (Dense) │ (None, 1) │ 17 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 3,929 (15.35 KB)
Trainable params: 3,929 (15.35 KB)
Non-trainable params: 0 (0.00 B)
None Epoch 1/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 41ms/step - loss: 0.0170 - val_loss: 0.0176 Epoch 2/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 9.6354e-04 - val_loss: 0.0066 Epoch 3/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 3.7462e-04 - val_loss: 0.0061 Epoch 4/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 3.5291e-04 - val_loss: 0.0051 Epoch 5/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 3.7763e-04 - val_loss: 0.0040 Epoch 6/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 30ms/step - loss: 2.9151e-04 - val_loss: 0.0030 Epoch 7/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 46ms/step - loss: 2.4055e-04 - val_loss: 0.0025 Epoch 8/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 47ms/step - loss: 3.2479e-04 - val_loss: 0.0035 Epoch 9/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 26ms/step - loss: 2.0474e-04 - val_loss: 0.0017 Epoch 10/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.7320e-04 - val_loss: 0.0043 Epoch 11/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 2.6413e-04 - val_loss: 0.0027 Epoch 12/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 2.1953e-04 - val_loss: 0.0024 Epoch 13/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 2.2870e-04 - val_loss: 0.0019 Epoch 14/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.7741e-04 - val_loss: 0.0020 Epoch 15/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.6928e-04 - val_loss: 0.0018 Epoch 16/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.8081e-04 - val_loss: 0.0020 Epoch 17/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 33ms/step - loss: 1.5466e-04 - val_loss: 0.0015 Epoch 18/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 49ms/step - loss: 1.9036e-04 - val_loss: 0.0036 Epoch 19/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 47ms/step - loss: 1.8925e-04 - val_loss: 0.0021 Epoch 20/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 27ms/step - loss: 1.5765e-04 - val_loss: 0.0021 Epoch 21/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.8670e-04 - val_loss: 0.0014 Epoch 22/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.6140e-04 - val_loss: 0.0019 Epoch 23/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 1.3598e-04 - val_loss: 0.0028 Epoch 24/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.6182e-04 - val_loss: 0.0013 Epoch 25/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.3415e-04 - val_loss: 9.3002e-04 Epoch 26/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 1.6832e-04 - val_loss: 9.4082e-04 Epoch 27/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 37ms/step - loss: 1.3766e-04 - val_loss: 0.0016 Epoch 28/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 38ms/step - loss: 1.1091e-04 - val_loss: 0.0022 Epoch 29/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.5975e-04 - val_loss: 9.3129e-04 Epoch 30/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.5195e-04 - val_loss: 0.0024 Epoch 31/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.5389e-04 - val_loss: 0.0017 Epoch 32/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.4026e-04 - val_loss: 0.0020 Epoch 33/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 28ms/step - loss: 1.3248e-04 - val_loss: 0.0015 Epoch 34/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.9398e-04 - val_loss: 8.9390e-04 Epoch 35/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 25ms/step - loss: 1.1298e-04 - val_loss: 0.0014 Epoch 36/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.2752e-04 - val_loss: 7.7109e-04 Epoch 37/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 35ms/step - loss: 1.4149e-04 - val_loss: 8.7161e-04 Epoch 38/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 38ms/step - loss: 1.4476e-04 - val_loss: 0.0015 Epoch 39/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 27ms/step - loss: 1.6538e-04 - val_loss: 9.1392e-04 Epoch 40/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.3523e-04 - val_loss: 5.8130e-04 Epoch 41/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.5506e-04 - val_loss: 6.1110e-04 Epoch 42/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.5385e-04 - val_loss: 0.0011 Epoch 43/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.3727e-04 - val_loss: 0.0014 Epoch 44/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.3262e-04 - val_loss: 7.9966e-04 Epoch 45/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 26ms/step - loss: 1.0460e-04 - val_loss: 9.3541e-04 Epoch 46/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 34ms/step - loss: 1.2252e-04 - val_loss: 0.0010 Epoch 47/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 39ms/step - loss: 1.1470e-04 - val_loss: 0.0012 Epoch 48/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 27ms/step - loss: 1.1387e-04 - val_loss: 5.4329e-04 Epoch 49/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.2072e-04 - val_loss: 0.0014 Epoch 50/50 37/37 ━━━━━━━━━━━━━━━━━━━━ 1s 27ms/step - loss: 1.5341e-04 - val_loss: 0.0011
Out[9]:
<keras.src.callbacks.history.History at 0x78774de83040>
In [10]:
predicted_stock_price_RNN = model_RNN.predict(X_test)
predicted_stock_price_RNN = sc.inverse_transform(predicted_stock_price_RNN)
fig = plt.figure(figsize=(12, 5))
ax1 = plt.subplot2grid((1, 2), (0, 0)) # 1 row, 2 columns, position (0, 0)
ax1.plot(real_stock_price, color='red', label='Real Price')
ax1.plot(predicted_stock_price_RNN, color='blue', label='Predicted Price')
ax1.set_title('Google Price Prediction (Time Series)')
ax1.set_xlabel('Time')
ax1.set_ylabel('Price')
ax1.legend()
ax1.grid(True)
x = np.linspace(900, 1200, 100)
ax2 = plt.subplot2grid((1, 2), (0, 1)) # 1 row, 2 columns, position (0, 1)
ax2.plot(x, x, color='red', linestyle='-', label='Equivalent Line')
ax2.plot(real_stock_price, predicted_stock_price_RNN, 'b.', label='Pred. vs. Obs.')
ax2.set_title('Google Price Prediction (Scatter)')
ax2.set_xlabel('Observed')
ax2.set_ylabel('Predicted')
ax2.set_aspect('equal')
ax2.set_xlim([900, 1200])
ax2.set_ylim([900, 1200])
ax2.grid(True)
ax2.legend()
plt.tight_layout()
plt.show()
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 92ms/step
GRU¶
We did't introduced GRU, but it is a improvement of LSTM and RNN.¶
For more about GRU, please refer to:¶
Cho et al., (2014): https://arxiv.org/pdf/1406.1078¶
Chung et al. (2015): https://arxiv.org/pdf/1412.3555¶
¶
In [11]:
# Example with GRU
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GRU
model_GRU = Sequential([
GRU(32, return_sequences=True, input_shape=(X_train.shape[1], 1)),
#BatchNormalization(),
GRU(16),
#BatchNormalization(),
Dense(1, activation='relu')
])
# Compile the model
model_GRU.compile(optimizer='adam', loss='mse')
print(model_GRU.summary())
model_GRU.fit(x = X_train, y = y_train,batch_size=32,epochs=20,validation_split=0.2)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/rnn/rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(**kwargs)
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ gru (GRU) │ (None, 60, 32) │ 3,360 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ gru_1 (GRU) │ (None, 16) │ 2,400 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_2 (Dense) │ (None, 1) │ 17 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 5,777 (22.57 KB)
Trainable params: 5,777 (22.57 KB)
Non-trainable params: 0 (0.00 B)
None Epoch 1/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 8s 96ms/step - loss: 0.0205 - val_loss: 0.0071 Epoch 2/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 5s 89ms/step - loss: 8.4116e-04 - val_loss: 4.1760e-04 Epoch 3/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 62ms/step - loss: 2.1220e-04 - val_loss: 3.4530e-04 Epoch 4/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 65ms/step - loss: 2.3977e-04 - val_loss: 4.0680e-04 Epoch 5/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 65ms/step - loss: 2.4798e-04 - val_loss: 3.5080e-04 Epoch 6/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 4s 113ms/step - loss: 2.4314e-04 - val_loss: 3.2878e-04 Epoch 7/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 62ms/step - loss: 2.4331e-04 - val_loss: 4.7494e-04 Epoch 8/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 62ms/step - loss: 1.7080e-04 - val_loss: 2.8601e-04 Epoch 9/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 63ms/step - loss: 1.7629e-04 - val_loss: 5.0249e-04 Epoch 10/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 67ms/step - loss: 1.5528e-04 - val_loss: 2.4207e-04 Epoch 11/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 92ms/step - loss: 2.2187e-04 - val_loss: 2.5356e-04 Epoch 12/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 67ms/step - loss: 2.1749e-04 - val_loss: 3.3062e-04 Epoch 13/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 61ms/step - loss: 1.7155e-04 - val_loss: 4.3853e-04 Epoch 14/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 62ms/step - loss: 1.5625e-04 - val_loss: 4.0721e-04 Epoch 15/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 61ms/step - loss: 1.9203e-04 - val_loss: 3.7330e-04 Epoch 16/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 81ms/step - loss: 1.3395e-04 - val_loss: 2.6560e-04 Epoch 17/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 3s 79ms/step - loss: 1.4167e-04 - val_loss: 1.8545e-04 Epoch 18/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 62ms/step - loss: 1.8913e-04 - val_loss: 3.2222e-04 Epoch 19/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 61ms/step - loss: 1.7410e-04 - val_loss: 2.6995e-04 Epoch 20/20 37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 63ms/step - loss: 1.7524e-04 - val_loss: 3.5745e-04
Out[11]:
<keras.src.callbacks.history.History at 0x78774e447400>
In [12]:
predicted_stock_price_GRU = model_GRU.predict(X_test)
predicted_stock_price_GRU = sc.inverse_transform(predicted_stock_price_GRU)
fig = plt.figure(figsize=(12, 5))
ax1 = plt.subplot2grid((1, 2), (0, 0)) # 1 row, 2 columns, position (0, 0)
ax1.plot(real_stock_price, color='red', label='Real Price')
ax1.plot(predicted_stock_price_GRU, color='blue', label='Predicted Price')
ax1.set_title('Google Price Prediction (Time Series)')
ax1.set_xlabel('Time')
ax1.set_ylabel('Price')
ax1.legend()
ax1.grid(True)
x = np.linspace(900, 1200, 100)
ax2 = plt.subplot2grid((1, 2), (0, 1)) # 1 row, 2 columns, position (0, 1)
ax2.plot(x, x, color='red', linestyle='-', label='Equivalent Line')
ax2.plot(real_stock_price, predicted_stock_price_GRU, 'b.', label='Pred. vs. Obs.')
ax2.set_title('Google Price Prediction (Scatter)')
ax2.set_xlabel('Observed')
ax2.set_ylabel('Predicted')
ax2.set_aspect('equal')
ax2.set_xlim([900, 1200])
ax2.set_ylim([900, 1200])
ax2.grid(True)
ax2.legend()
plt.tight_layout()
plt.show()
WARNING:tensorflow:5 out of the last 9 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x78774c245870> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
1/4 ━━━━━━━━━━━━━━━━━━━━ 1s 338ms/step
WARNING:tensorflow:6 out of the last 12 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x78774c245870> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 129ms/step