{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "n8ibOnwUCzji" }, "source": [ "
| Price | \n", "Close | \n", "High | \n", "Low | \n", "Open | \n", "Volume | \n", "
|---|---|---|---|---|---|
| Ticker | \n", "WMT | \n", "WMT | \n", "WMT | \n", "WMT | \n", "WMT | \n", "
| Date | \n", "\n", " | \n", " | \n", " | \n", " | \n", " |
| 2022-08-25 | \n", "43.499287 | \n", "43.537706 | \n", "43.230355 | \n", "43.310396 | \n", "19414500 | \n", "
| 2022-08-26 | \n", "42.132236 | \n", "43.806639 | \n", "42.103420 | \n", "43.576127 | \n", "21331800 | \n", "
| 2022-08-29 | \n", "42.542027 | \n", "42.858980 | \n", "41.808875 | \n", "41.885714 | \n", "17381700 | \n", "
| 2022-08-30 | \n", "42.413963 | \n", "42.788543 | \n", "42.177050 | \n", "42.689296 | \n", "14468100 | \n", "
| 2022-08-31 | \n", "42.436371 | \n", "42.833362 | \n", "42.237873 | \n", "42.510004 | \n", "22460700 | \n", "
Model: \"sequential\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ gru (GRU) │ (None, 30, 128) │ 50,304 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout (Dropout) │ (None, 30, 128) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_1 (GRU) │ (None, 30, 64) │ 37,248 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_1 (Dropout) │ (None, 30, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_2 (GRU) │ (None, 30, 64) │ 24,960 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_2 (Dropout) │ (None, 30, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_3 (GRU) │ (None, 30, 64) │ 24,960 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_3 (Dropout) │ (None, 30, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_4 (GRU) │ (None, 30, 64) │ 24,960 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_4 (Dropout) │ (None, 30, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_5 (GRU) │ (None, 30, 64) │ 24,960 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_5 (Dropout) │ (None, 30, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_6 (GRU) │ (None, 64) │ 24,960 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_6 (Dropout) │ (None, 64) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense (Dense) │ (None, 25) │ 1,625 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_1 (Dense) │ (None, 1) │ 26 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ gru (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m50,304\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m128\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_1 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m37,248\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_1 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_2 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m24,960\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_2 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_3 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m24,960\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_3 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_4 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m24,960\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_4 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_5 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m24,960\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_5 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m30\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ gru_6 (\u001b[38;5;33mGRU\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m24,960\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_6 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m25\u001b[0m) │ \u001b[38;5;34m1,625\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_1 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m26\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 214,003 (835.95 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m214,003\u001b[0m (835.95 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 214,003 (835.95 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m214,003\u001b[0m (835.95 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 0 (0.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m0\u001b[0m (0.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "model = keras.Sequential()\n", "model.add(layers.GRU(units=128, return_sequences=True, input_shape=(x_train.shape[1], 1)))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=True))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=True))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=True))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=True))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=True))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.GRU(units=64, return_sequences=False))\n", "model.add(layers.Dropout(0.5))\n", "\n", "model.add(layers.Dense(25))\n", "model.add(layers.Dense(1))\n", "\n", "model.compile(optimizer='adam', loss='mean_squared_error')\n", "model.summary()" ] }, { "cell_type": "markdown", "metadata": { "id": "RPsPTkcaCzjw" }, "source": [ "## 7. Train the Model" ] }, { "cell_type": "markdown", "metadata": { "id": "ZBSGMO0ACzjw" }, "source": [ "`batch_size` defines the number of samples that will be fed into the network in each epoch.\n", "\n", "Suppose you have 150 training samples and you want to set up a batch_size equal to 40. The algorithm takes the first 40 samples (from 1st to 40th) from the training dataset and trains the network. Next, it takes the second 40 samples (from 41st to 80th) and trains the network again. We can keep doing this procedure until we have propagated all samples through of the network. Problem might happen with the last set of samples. In our example, we will be left with 30 samples which is not divisible by 40 without remainder. The simplest solution is just to get the final 30 samples and train the network.\n", "\n", "\n", "`epochs` mean how many times you go through your training set. In order to minimize the loss function (i.e., errors of our model) and by backpropagation (i.e., providing feedback to the network), we need to update those weights multiple times in order to reach the optimum loss, so the algorithm needs to pass throught all the dataset multiple times (i.e., we need multiple epochs).\n", "\n", "A training step means using one batch size of training data to train the model." ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_7vkTHkdY0Gt", "outputId": "5c21721c-e225-46de-f9ed-93d6c9fad7cc" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/2\n", "\u001b[1m78/78\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m27s\u001b[0m 136ms/step - loss: 0.0125\n", "Epoch 2/2\n", "\u001b[1m78/78\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 128ms/step - loss: 0.0053\n" ] }, { "data": { "text/plain": [ "