Lecture 24, momentum in neural networks
This commit is contained in:
parent
d405eb9916
commit
e190b86c70
@ -1339,6 +1339,247 @@
|
|||||||
" optimizer.update_params(dense2)\n",
|
" optimizer.update_params(dense2)\n",
|
||||||
" optimizer.post_update_params()\n"
|
" optimizer.post_update_params()\n"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Momentum in Training Neural Networks\n",
|
||||||
|
"Using the past information on how changing the weights and biases changes the loss, we can use the large dimensional vectors to try and cancel out the overshoot.\n",
|
||||||
|
"\n",
|
||||||
|
"The new weight update = some factor * last weight update + learning rate * current gradient"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class Optimizer_SGD():\n",
|
||||||
|
" def __init__(self, learning_rate=0.5, decay=0.0, momentum=0.0):\n",
|
||||||
|
" self.initial_rate = learning_rate\n",
|
||||||
|
" self.current_learning_rate = self.initial_rate\n",
|
||||||
|
" self.decay = decay\n",
|
||||||
|
" self.iterations = 0\n",
|
||||||
|
" self.momentum = momentum\n",
|
||||||
|
"\n",
|
||||||
|
" def pre_update_params(self):\n",
|
||||||
|
" # Update the current_learning_rate before updating params\n",
|
||||||
|
" if self.decay:\n",
|
||||||
|
" self.current_learning_rate = self.initial_rate / (1 + self.decay * self.iterations)\n",
|
||||||
|
"\n",
|
||||||
|
" def update_params(self, layer):\n",
|
||||||
|
" if self.momentum:\n",
|
||||||
|
" # For each layer, we need to use its last momentums\n",
|
||||||
|
"\n",
|
||||||
|
" # First check if the layer has a last momentum stored\n",
|
||||||
|
" if not hasattr(layer, 'weight_momentums'):\n",
|
||||||
|
" layer.weight_momentums = np.zeros_like(layer.weights)\n",
|
||||||
|
" layer.bias_momentums = np.zeros_like(layer.biases)\n",
|
||||||
|
" \n",
|
||||||
|
" weight_updates = self.momentum * layer.weight_momentums - \\\n",
|
||||||
|
" self.current_learning_rate * layer.dweights\n",
|
||||||
|
" layer.weight_momentums = weight_updates\n",
|
||||||
|
"\n",
|
||||||
|
" bias_updates = self.momentum * layer.bias_momentums - \\\n",
|
||||||
|
" self.current_learning_rate * layer.dbiases\n",
|
||||||
|
" layer.bias_momentums = bias_updates\n",
|
||||||
|
" \n",
|
||||||
|
" # Not using momentum\n",
|
||||||
|
" else:\n",
|
||||||
|
" weight_updates = -self.current_learning_rate * layer.dweights\n",
|
||||||
|
" bias_updates = -self.current_learning_rate * layer.dbiases\n",
|
||||||
|
"\n",
|
||||||
|
" layer.weights += weight_updates\n",
|
||||||
|
" layer.biases += bias_updates\n",
|
||||||
|
"\n",
|
||||||
|
" def post_update_params(self):\n",
|
||||||
|
" # Update the self.iterations for use with decay\n",
|
||||||
|
" self.iterations += 1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Testing the Gradient Optimizer with Momentum"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"epoch: 0, acc: 0.317, loss: 1.099\n",
|
||||||
|
"epoch: 100, acc: 0.437, loss: 1.033\n",
|
||||||
|
"epoch: 200, acc: 0.483, loss: 0.913\n",
|
||||||
|
"epoch: 300, acc: 0.750, loss: 0.622\n",
|
||||||
|
"epoch: 400, acc: 0.810, loss: 0.471\n",
|
||||||
|
"epoch: 500, acc: 0.850, loss: 0.403\n",
|
||||||
|
"epoch: 600, acc: 0.873, loss: 0.380\n",
|
||||||
|
"epoch: 700, acc: 0.843, loss: 0.378\n",
|
||||||
|
"epoch: 800, acc: 0.897, loss: 0.289\n",
|
||||||
|
"epoch: 900, acc: 0.910, loss: 0.264\n",
|
||||||
|
"epoch: 1000, acc: 0.883, loss: 0.293\n",
|
||||||
|
"epoch: 1100, acc: 0.923, loss: 0.236\n",
|
||||||
|
"epoch: 1200, acc: 0.927, loss: 0.227\n",
|
||||||
|
"epoch: 1300, acc: 0.903, loss: 0.238\n",
|
||||||
|
"epoch: 1400, acc: 0.927, loss: 0.215\n",
|
||||||
|
"epoch: 1500, acc: 0.927, loss: 0.204\n",
|
||||||
|
"epoch: 1600, acc: 0.930, loss: 0.198\n",
|
||||||
|
"epoch: 1700, acc: 0.927, loss: 0.195\n",
|
||||||
|
"epoch: 1800, acc: 0.927, loss: 0.191\n",
|
||||||
|
"epoch: 1900, acc: 0.930, loss: 0.190\n",
|
||||||
|
"epoch: 2000, acc: 0.930, loss: 0.185\n",
|
||||||
|
"epoch: 2100, acc: 0.930, loss: 0.183\n",
|
||||||
|
"epoch: 2200, acc: 0.930, loss: 0.181\n",
|
||||||
|
"epoch: 2300, acc: 0.930, loss: 0.179\n",
|
||||||
|
"epoch: 2400, acc: 0.933, loss: 0.177\n",
|
||||||
|
"epoch: 2500, acc: 0.930, loss: 0.175\n",
|
||||||
|
"epoch: 2600, acc: 0.930, loss: 0.174\n",
|
||||||
|
"epoch: 2700, acc: 0.930, loss: 0.172\n",
|
||||||
|
"epoch: 2800, acc: 0.930, loss: 0.171\n",
|
||||||
|
"epoch: 2900, acc: 0.930, loss: 0.170\n",
|
||||||
|
"epoch: 3000, acc: 0.933, loss: 0.168\n",
|
||||||
|
"epoch: 3100, acc: 0.930, loss: 0.167\n",
|
||||||
|
"epoch: 3200, acc: 0.933, loss: 0.167\n",
|
||||||
|
"epoch: 3300, acc: 0.937, loss: 0.166\n",
|
||||||
|
"epoch: 3400, acc: 0.937, loss: 0.165\n",
|
||||||
|
"epoch: 3500, acc: 0.940, loss: 0.164\n",
|
||||||
|
"epoch: 3600, acc: 0.940, loss: 0.163\n",
|
||||||
|
"epoch: 3700, acc: 0.940, loss: 0.162\n",
|
||||||
|
"epoch: 3800, acc: 0.943, loss: 0.161\n",
|
||||||
|
"epoch: 3900, acc: 0.940, loss: 0.161\n",
|
||||||
|
"epoch: 4000, acc: 0.940, loss: 0.160\n",
|
||||||
|
"epoch: 4100, acc: 0.940, loss: 0.159\n",
|
||||||
|
"epoch: 4200, acc: 0.940, loss: 0.159\n",
|
||||||
|
"epoch: 4300, acc: 0.940, loss: 0.158\n",
|
||||||
|
"epoch: 4400, acc: 0.940, loss: 0.158\n",
|
||||||
|
"epoch: 4500, acc: 0.940, loss: 0.157\n",
|
||||||
|
"epoch: 4600, acc: 0.940, loss: 0.157\n",
|
||||||
|
"epoch: 4700, acc: 0.940, loss: 0.156\n",
|
||||||
|
"epoch: 4800, acc: 0.943, loss: 0.156\n",
|
||||||
|
"epoch: 4900, acc: 0.940, loss: 0.155\n",
|
||||||
|
"epoch: 5000, acc: 0.940, loss: 0.155\n",
|
||||||
|
"epoch: 5100, acc: 0.940, loss: 0.154\n",
|
||||||
|
"epoch: 5200, acc: 0.940, loss: 0.154\n",
|
||||||
|
"epoch: 5300, acc: 0.940, loss: 0.154\n",
|
||||||
|
"epoch: 5400, acc: 0.940, loss: 0.153\n",
|
||||||
|
"epoch: 5500, acc: 0.940, loss: 0.153\n",
|
||||||
|
"epoch: 5600, acc: 0.940, loss: 0.153\n",
|
||||||
|
"epoch: 5700, acc: 0.940, loss: 0.152\n",
|
||||||
|
"epoch: 5800, acc: 0.940, loss: 0.152\n",
|
||||||
|
"epoch: 5900, acc: 0.940, loss: 0.152\n",
|
||||||
|
"epoch: 6000, acc: 0.940, loss: 0.151\n",
|
||||||
|
"epoch: 6100, acc: 0.940, loss: 0.151\n",
|
||||||
|
"epoch: 6200, acc: 0.940, loss: 0.151\n",
|
||||||
|
"epoch: 6300, acc: 0.940, loss: 0.151\n",
|
||||||
|
"epoch: 6400, acc: 0.940, loss: 0.150\n",
|
||||||
|
"epoch: 6500, acc: 0.940, loss: 0.150\n",
|
||||||
|
"epoch: 6600, acc: 0.940, loss: 0.150\n",
|
||||||
|
"epoch: 6700, acc: 0.940, loss: 0.150\n",
|
||||||
|
"epoch: 6800, acc: 0.940, loss: 0.150\n",
|
||||||
|
"epoch: 6900, acc: 0.940, loss: 0.149\n",
|
||||||
|
"epoch: 7000, acc: 0.940, loss: 0.149\n",
|
||||||
|
"epoch: 7100, acc: 0.940, loss: 0.149\n",
|
||||||
|
"epoch: 7200, acc: 0.940, loss: 0.149\n",
|
||||||
|
"epoch: 7300, acc: 0.940, loss: 0.149\n",
|
||||||
|
"epoch: 7400, acc: 0.940, loss: 0.148\n",
|
||||||
|
"epoch: 7500, acc: 0.940, loss: 0.148\n",
|
||||||
|
"epoch: 7600, acc: 0.943, loss: 0.148\n",
|
||||||
|
"epoch: 7700, acc: 0.943, loss: 0.148\n",
|
||||||
|
"epoch: 7800, acc: 0.940, loss: 0.147\n",
|
||||||
|
"epoch: 7900, acc: 0.943, loss: 0.147\n",
|
||||||
|
"epoch: 8000, acc: 0.943, loss: 0.147\n",
|
||||||
|
"epoch: 8100, acc: 0.943, loss: 0.147\n",
|
||||||
|
"epoch: 8200, acc: 0.940, loss: 0.147\n",
|
||||||
|
"epoch: 8300, acc: 0.943, loss: 0.147\n",
|
||||||
|
"epoch: 8400, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 8500, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 8600, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 8700, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 8800, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 8900, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 9000, acc: 0.943, loss: 0.146\n",
|
||||||
|
"epoch: 9100, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9200, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9300, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9400, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9500, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9600, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9700, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9800, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 9900, acc: 0.943, loss: 0.145\n",
|
||||||
|
"epoch: 10000, acc: 0.943, loss: 0.144\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Create dataset\n",
|
||||||
|
"X, y = spiral_data(samples=100, classes=3)\n",
|
||||||
|
"\n",
|
||||||
|
"# Create Dense layer with 2 input features and 64 output values\n",
|
||||||
|
"dense1 = Layer_Dense(2, 64)\n",
|
||||||
|
"\n",
|
||||||
|
"# Create ReLU activation (to be used with Dense layer)\n",
|
||||||
|
"activation1 = Activation_ReLU()\n",
|
||||||
|
"\n",
|
||||||
|
"# Create second Dense layer with 64 input features (as we take output\n",
|
||||||
|
"# of previous layer here) and 3 output values (output values)\n",
|
||||||
|
"dense2 = Layer_Dense(64, 3)\n",
|
||||||
|
"\n",
|
||||||
|
"# Create Softmax classifier's combined loss and activation\n",
|
||||||
|
"loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()\n",
|
||||||
|
"\n",
|
||||||
|
"# Create optimizer\n",
|
||||||
|
"optimizer = Optimizer_SGD(learning_rate=1.0, decay=1e-3, momentum=0.9)\n",
|
||||||
|
"\n",
|
||||||
|
"# Train in loop\n",
|
||||||
|
"for epoch in range(10001):\n",
|
||||||
|
" # Perform a forward pass of our training data through this layer\n",
|
||||||
|
" dense1.forward(X)\n",
|
||||||
|
" \n",
|
||||||
|
" # Perform a forward pass through activation function\n",
|
||||||
|
" # takes the output of first dense layer here\n",
|
||||||
|
" activation1.forward(dense1.output)\n",
|
||||||
|
" \n",
|
||||||
|
" # Perform a forward pass through second Dense layer\n",
|
||||||
|
" # takes outputs of activation function of first layer as inputs\n",
|
||||||
|
" dense2.forward(activation1.output)\n",
|
||||||
|
" \n",
|
||||||
|
" # Perform a forward pass through the activation/loss function\n",
|
||||||
|
" # takes the output of second dense layer here and returns loss\n",
|
||||||
|
" loss = loss_activation.forward(dense2.output, y)\n",
|
||||||
|
" \n",
|
||||||
|
" # Calculate accuracy from output of activation2 and targets\n",
|
||||||
|
" # calculate values along first axis\n",
|
||||||
|
" predictions = np.argmax(loss_activation.output, axis=1)\n",
|
||||||
|
" if len(y.shape) == 2:\n",
|
||||||
|
" y = np.argmax(y, axis=1)\n",
|
||||||
|
" accuracy = np.mean(predictions == y)\n",
|
||||||
|
" \n",
|
||||||
|
" if not epoch % 100:\n",
|
||||||
|
" print(f'epoch: {epoch}, ' +\n",
|
||||||
|
" f'acc: {accuracy:.3f}, ' +\n",
|
||||||
|
" f'loss: {loss:.3f}')\n",
|
||||||
|
" \n",
|
||||||
|
" # Backward pass\n",
|
||||||
|
" loss_activation.backward(loss_activation.output, y)\n",
|
||||||
|
" dense2.backward(loss_activation.dinputs)\n",
|
||||||
|
" activation1.backward(dense2.dinputs)\n",
|
||||||
|
" dense1.backward(activation1.dinputs)\n",
|
||||||
|
" \n",
|
||||||
|
" # Update weights and biases\n",
|
||||||
|
" optimizer.pre_update_params()\n",
|
||||||
|
" optimizer.update_params(dense1)\n",
|
||||||
|
" optimizer.update_params(dense2)\n",
|
||||||
|
" optimizer.post_update_params()\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user