The Algorithms logo
The Algorithms
AboutDonate

Rnn

H

Recurrent Neural Network

Part 1 - Data Preprocessing

# Importing the libraries

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set

dataset_train = pd.read_csv('Google_train.csv')
training_set = dataset_train.iloc[:,0:].values
training_set
array([[1.51532028e+02, 1.54919327e+02, 1.50739990e+02, 1.53250580e+02,
        5.79510000e+06],
       [1.53723801e+02, 1.60309128e+02, 1.52179596e+02, 1.60060059e+02,
        7.24800000e+06],
       [1.59900650e+02, 1.65001541e+02, 1.56911850e+02, 1.63412491e+02,
        9.81450000e+06],
       ...,
       [1.14398999e+03, 1.14733997e+03, 1.13878003e+03, 1.14490002e+03,
        8.64000000e+05],
       [1.14685999e+03, 1.15081995e+03, 1.13940002e+03, 1.15033997e+03,
        9.03800000e+05],
       [1.14600000e+03, 1.15857996e+03, 1.14500000e+03, 1.15357996e+03,
        1.23880000e+06]])

Feature Scaling

  • Feature Scaling in done so that all values are in same range, columns with higher values than other features like volumne may be given higher importance in neural network creating a biased network.
  • Hence it is important to bring all feautres to same scale
# Feature Scaling

from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)

Creating training set

  • Training set will contain 5 inputs and 5 outputs, not predicting volume.
  • RNN requires stack data i.e memory of data needs to passed. For training ith value in RNN with n time steps, all previous all upto i-nth value needs to passed.
# Creating a data structure with 120 Daysteps and 5 output

X_train = []
y_train = []
for i in range(120, 2425):
    X_train.append(training_set_scaled[i-120:i])
    y_train.append(training_set_scaled[i])
X_train, y_train = np.array(X_train), np.array(y_train)

# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 5))

Part 2 - Building the RNN

# Importing the Keras libraries and packages

from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()

# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 75, return_sequences = True, input_shape = (X_train.shape[1], 5)))
regressor.add(Dropout(0.2))

# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 75, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 75, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 75))
regressor.add(Dropout(0.2))

# Adding the output layer
regressor.add(Dense(units = 5))
# Compiling the RNN

regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
Epoch 1/100
73/73 [==============================] - 14s 127ms/step - loss: 0.0360
Epoch 2/100
73/73 [==============================] - 9s 127ms/step - loss: 0.0054
Epoch 3/100
73/73 [==============================] - 10s 133ms/step - loss: 0.0062
Epoch 4/100
73/73 [==============================] - 10s 131ms/step - loss: 0.0041
Epoch 5/100
73/73 [==============================] - 10s 136ms/step - loss: 0.0037
Epoch 6/100
73/73 [==============================] - 10s 134ms/step - loss: 0.0031
Epoch 7/100
73/73 [==============================] - 10s 144ms/step - loss: 0.0028
Epoch 8/100
73/73 [==============================] - 10s 139ms/step - loss: 0.0030
Epoch 9/100
73/73 [==============================] - 10s 139ms/step - loss: 0.0034
Epoch 10/100
73/73 [==============================] - 10s 135ms/step - loss: 0.0031
Epoch 11/100
73/73 [==============================] - 10s 135ms/step - loss: 0.0025
Epoch 12/100
73/73 [==============================] - 10s 136ms/step - loss: 0.0025
Epoch 13/100
73/73 [==============================] - 10s 136ms/step - loss: 0.0033
Epoch 14/100
73/73 [==============================] - 10s 135ms/step - loss: 0.0021
Epoch 15/100
73/73 [==============================] - 10s 142ms/step - loss: 0.0025
Epoch 16/100
73/73 [==============================] - 10s 137ms/step - loss: 0.0021
Epoch 17/100
73/73 [==============================] - 10s 138ms/step - loss: 0.0022
Epoch 18/100
73/73 [==============================] - 10s 138ms/step - loss: 0.0021
Epoch 19/100
73/73 [==============================] - 10s 137ms/step - loss: 0.0023
Epoch 20/100
73/73 [==============================] - 10s 137ms/step - loss: 0.0023
Epoch 21/100
73/73 [==============================] - 10s 138ms/step - loss: 0.0023
Epoch 22/100
73/73 [==============================] - 10s 137ms/step - loss: 0.0029
Epoch 23/100
73/73 [==============================] - 10s 138ms/step - loss: 0.0018
Epoch 24/100
73/73 [==============================] - 11s 151ms/step - loss: 0.0019
Epoch 25/100
73/73 [==============================] - 11s 154ms/step - loss: 0.0022
Epoch 26/100
73/73 [==============================] - 11s 149ms/step - loss: 0.0019
Epoch 27/100
73/73 [==============================] - 12s 158ms/step - loss: 0.0017
Epoch 28/100
73/73 [==============================] - 12s 159ms/step - loss: 0.0029
Epoch 29/100
73/73 [==============================] - 11s 150ms/step - loss: 0.0017
Epoch 30/100
73/73 [==============================] - 10s 142ms/step - loss: 0.0021
Epoch 31/100
73/73 [==============================] - 10s 142ms/step - loss: 0.0016
Epoch 32/100
73/73 [==============================] - 11s 144ms/step - loss: 0.0019
Epoch 33/100
73/73 [==============================] - 11s 149ms/step - loss: 0.0020
Epoch 34/100
73/73 [==============================] - 11s 145ms/step - loss: 0.0014
Epoch 35/100
73/73 [==============================] - 11s 146ms/step - loss: 0.0018
Epoch 36/100
73/73 [==============================] - 11s 150ms/step - loss: 0.0018
Epoch 37/100
73/73 [==============================] - 11s 155ms/step - loss: 0.0014
Epoch 38/100
73/73 [==============================] - 12s 162ms/step - loss: 0.0018
Epoch 39/100
73/73 [==============================] - 12s 159ms/step - loss: 0.0017
Epoch 40/100
73/73 [==============================] - 11s 152ms/step - loss: 0.0017
Epoch 41/100
73/73 [==============================] - 11s 154ms/step - loss: 0.0017
Epoch 42/100
73/73 [==============================] - 12s 160ms/step - loss: 0.0021
Epoch 43/100
73/73 [==============================] - 12s 165ms/step - loss: 0.0019
Epoch 44/100
73/73 [==============================] - 13s 175ms/step - loss: 0.0014
Epoch 45/100
73/73 [==============================] - 13s 173ms/step - loss: 0.0017
Epoch 46/100
73/73 [==============================] - 13s 174ms/step - loss: 0.0015
Epoch 47/100
73/73 [==============================] - 12s 161ms/step - loss: 0.0017
Epoch 48/100
73/73 [==============================] - 12s 158ms/step - loss: 0.0018
Epoch 49/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0017
Epoch 50/100
73/73 [==============================] - 13s 171ms/step - loss: 0.0016
Epoch 51/100
73/73 [==============================] - 13s 174ms/step - loss: 0.0016
Epoch 52/100
73/73 [==============================] - 12s 165ms/step - loss: 0.0014
Epoch 53/100
73/73 [==============================] - 13s 172ms/step - loss: 0.0015
Epoch 54/100
73/73 [==============================] - 13s 171ms/step - loss: 0.0014
Epoch 55/100
73/73 [==============================] - 12s 170ms/step - loss: 0.0018
Epoch 56/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0014
Epoch 57/100
73/73 [==============================] - 12s 165ms/step - loss: 0.0015
Epoch 58/100
73/73 [==============================] - 13s 178ms/step - loss: 0.0017
Epoch 59/100
73/73 [==============================] - 14s 196ms/step - loss: 0.0017
Epoch 60/100
73/73 [==============================] - 15s 199ms/step - loss: 0.0013
Epoch 61/100
73/73 [==============================] - 14s 189ms/step - loss: 0.0016
Epoch 62/100
73/73 [==============================] - 13s 179ms/step - loss: 0.0013
Epoch 63/100
73/73 [==============================] - 13s 179ms/step - loss: 0.0014
Epoch 64/100
73/73 [==============================] - 14s 192ms/step - loss: 0.0014
Epoch 65/100
73/73 [==============================] - 14s 186ms/step - loss: 0.0014
Epoch 66/100
73/73 [==============================] - 11s 156ms/step - loss: 0.0014
Epoch 67/100
73/73 [==============================] - 11s 149ms/step - loss: 0.0011
Epoch 68/100
73/73 [==============================] - 11s 146ms/step - loss: 0.0015
Epoch 69/100
73/73 [==============================] - 11s 148ms/step - loss: 0.0012
Epoch 70/100
73/73 [==============================] - 11s 150ms/step - loss: 0.0014
Epoch 71/100
73/73 [==============================] - 11s 151ms/step - loss: 0.0018
Epoch 72/100
73/73 [==============================] - 11s 152ms/step - loss: 0.0015
Epoch 73/100
73/73 [==============================] - 12s 158ms/step - loss: 0.0012
Epoch 74/100
73/73 [==============================] - 14s 197ms/step - loss: 0.0012
Epoch 75/100
73/73 [==============================] - 15s 204ms/step - loss: 0.0011
Epoch 76/100
73/73 [==============================] - 14s 198ms/step - loss: 0.0015
Epoch 77/100
73/73 [==============================] - 13s 179ms/step - loss: 0.0013
Epoch 78/100
73/73 [==============================] - 12s 171ms/step - loss: 0.0013
Epoch 79/100
73/73 [==============================] - 13s 181ms/step - loss: 0.0015
Epoch 80/100
73/73 [==============================] - 14s 187ms/step - loss: 0.0014
Epoch 81/100
73/73 [==============================] - 14s 197ms/step - loss: 0.0013
Epoch 82/100
73/73 [==============================] - 16s 213ms/step - loss: 0.0012
Epoch 83/100
73/73 [==============================] - 16s 219ms/step - loss: 0.0012
Epoch 84/100
73/73 [==============================] - 16s 218ms/step - loss: 0.0012
Epoch 85/100
73/73 [==============================] - 16s 213ms/step - loss: 0.0013
Epoch 86/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0015
Epoch 87/100
73/73 [==============================] - 11s 156ms/step - loss: 0.0012
Epoch 88/100
73/73 [==============================] - 12s 164ms/step - loss: 0.0013
Epoch 89/100
73/73 [==============================] - 13s 174ms/step - loss: 0.0017
Epoch 90/100
73/73 [==============================] - 13s 177ms/step - loss: 0.0012
Epoch 91/100
73/73 [==============================] - 12s 168ms/step - loss: 0.0012
Epoch 92/100
73/73 [==============================] - 12s 166ms/step - loss: 0.0012
Epoch 93/100
73/73 [==============================] - 12s 170ms/step - loss: 0.0014
Epoch 94/100
73/73 [==============================] - 12s 170ms/step - loss: 0.0010
Epoch 95/100
73/73 [==============================] - 13s 179ms/step - loss: 0.0014
Epoch 96/100
73/73 [==============================] - 13s 178ms/step - loss: 0.0012
Epoch 97/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0014
Epoch 98/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0013
Epoch 99/100
73/73 [==============================] - 12s 163ms/step - loss: 0.0013
Epoch 100/100
73/73 [==============================] - 12s 165ms/step - loss: 0.0012
<tensorflow.python.keras.callbacks.History at 0x7fd80e4dd490>

Part 3 - Making the predictions and visualising the results

# Getting the real stock price from the test set

dataset_test = pd.read_csv('Google_test.csv')
real_stock_price = dataset_test.iloc[:, 0:].values

Creating test dataset

  • Since RNN requries stack data i.e memory data it is important that starting values of test data also have previous data, which is present in training data.
  • A cumulative dataset is creating using training and test data, and using this dataset we are able to get memory for initial datapoints that are present in training set
# Creating dataset for prediction

dataset_total = pd.concat((dataset_train, dataset_test), axis = 0, sort = False)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 120:].values
inputs = sc.transform(inputs)
X_test = []
for i in range(120, 160):
    X_test.append(inputs[i-120:i])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 5))
# Making prediction

predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)

Visualizing predictions

plt.plot(real_stock_price[1:,0], color = 'red', marker = 'o', label = 'Real Google Stock Open Price')
plt.plot(predicted_stock_price[:,0], color = 'blue', marker = 'o',label = 'Predicted Google Open Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('DAY')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
plt.plot(real_stock_price[1:,1], color = 'red', marker = 'o', label = 'Real Google Stock High Price')
plt.plot(predicted_stock_price[:,1], color = 'blue', marker = 'o',label = 'Predicted Google High Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('DAY')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
plt.plot(real_stock_price[1:,2], color = 'red',  marker = 'o',label = 'Real Google Stock Low  Price')
plt.plot(predicted_stock_price[:,2], color = 'blue', marker = 'o',label = 'Predicted Google Stock Low Price')
plt.title('Google  Stock Price Prediction')
plt.xlabel('DAY')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
plt.plot(real_stock_price[1:,3], color = 'red', marker = 'o',label = 'Real Google Close Price')
plt.plot(predicted_stock_price[:,3], color = 'blue', marker = 'o',label = 'Predicted Google Stock Close Price')
plt.title('Google  Stock Price Prediction')
plt.xlabel('Date')
plt.ylabel('Googlen Stock Price')
plt.legend()
plt.show()
# Saving Model

regressor.save("open.h5")