Lecture 25, AdaGrad optimizer

This commit is contained in:
judsonupchurch 2025-01-21 01:37:38 +00:00
parent 55795cc52f
commit 2192bf3050
2 changed files with 236 additions and 6 deletions

View File

@ -256,7 +256,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Testing the Learning Rate Decay"
"## Testing the Learning Rate Decay"
]
},
{
@ -1403,7 +1403,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Testing the Gradient Optimizer with Momentum"
"## Testing the Gradient Optimizer with Momentum"
]
},
{

View File

@ -9,7 +9,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@ -23,7 +23,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@ -253,13 +253,243 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# AdaGrad Optimizer"
"# AdaGrad Optimizer\n",
"Different weights should have different learning rates. If one weight affects the loss much more strongly than the other, then consider using smaller learning rates with it. We can do this by maintaining a \"cache\" of the last gradients and normalizing based on this."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"class Optimizer_Adagrad():\n",
" def __init__(self, learning_rate=0.5, decay=0.0, epsilon=1e-7):\n",
" self.initial_learning_rate = learning_rate\n",
" self.current_learning_rate = self.initial_learning_rate\n",
" self.decay = decay\n",
" self.iterations = 0\n",
" self.epsilon = 0\n",
"\n",
" def pre_update_params(self):\n",
" if self.decay:\n",
" self.current_learning_rate = self.initial_learning_rate / (1 + self.decay * self.iterations)\n",
"\n",
" def update_params(self, layer):\n",
" if not hasattr(layer, 'weight_cache'):\n",
" layer.weight_cache = np.zeros_like(layer.weights)\n",
" layer.bias_cache = np.zeros_like(layer.biases)\n",
"\n",
" layer.weight_cache += layer.dweights**2\n",
" layer.bias_cache += layer.dbiases**2\n",
"\n",
" layer.weights += -self.current_learning_rate * layer.dweights / (np.sqrt(layer.weight_cache) + self.epsilon)\n",
" layer.biases += -self.current_learning_rate * layer.dbiases / (np.sqrt(layer.bias_cache) + self.epsilon)\n",
"\n",
" def post_update_params(self):\n",
" self.iterations += 1"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Testing the AdagGrad Optimizer"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch: 0, acc: 0.360, loss: 1.099\n",
"epoch: 100, acc: 0.477, loss: 0.992\n",
"epoch: 200, acc: 0.560, loss: 0.934\n",
"epoch: 300, acc: 0.600, loss: 0.881\n",
"epoch: 400, acc: 0.637, loss: 0.826\n",
"epoch: 500, acc: 0.617, loss: 0.793\n",
"epoch: 600, acc: 0.643, loss: 0.748\n",
"epoch: 700, acc: 0.657, loss: 0.726\n",
"epoch: 800, acc: 0.663, loss: 0.698\n",
"epoch: 900, acc: 0.680, loss: 0.681\n",
"epoch: 1000, acc: 0.683, loss: 0.664\n",
"epoch: 1100, acc: 0.693, loss: 0.653\n",
"epoch: 1200, acc: 0.693, loss: 0.642\n",
"epoch: 1300, acc: 0.707, loss: 0.632\n",
"epoch: 1400, acc: 0.720, loss: 0.625\n",
"epoch: 1500, acc: 0.717, loss: 0.615\n",
"epoch: 1600, acc: 0.723, loss: 0.608\n",
"epoch: 1700, acc: 0.730, loss: 0.600\n",
"epoch: 1800, acc: 0.740, loss: 0.588\n",
"epoch: 1900, acc: 0.740, loss: 0.581\n",
"epoch: 2000, acc: 0.743, loss: 0.576\n",
"epoch: 2100, acc: 0.740, loss: 0.570\n",
"epoch: 2200, acc: 0.750, loss: 0.563\n",
"epoch: 2300, acc: 0.747, loss: 0.562\n",
"epoch: 2400, acc: 0.757, loss: 0.557\n",
"epoch: 2500, acc: 0.760, loss: 0.554\n",
"epoch: 2600, acc: 0.770, loss: 0.550\n",
"epoch: 2700, acc: 0.777, loss: 0.546\n",
"epoch: 2800, acc: 0.777, loss: 0.543\n",
"epoch: 2900, acc: 0.780, loss: 0.540\n",
"epoch: 3000, acc: 0.777, loss: 0.537\n",
"epoch: 3100, acc: 0.773, loss: 0.533\n",
"epoch: 3200, acc: 0.783, loss: 0.531\n",
"epoch: 3300, acc: 0.777, loss: 0.528\n",
"epoch: 3400, acc: 0.777, loss: 0.526\n",
"epoch: 3500, acc: 0.773, loss: 0.523\n",
"epoch: 3600, acc: 0.780, loss: 0.522\n",
"epoch: 3700, acc: 0.773, loss: 0.520\n",
"epoch: 3800, acc: 0.777, loss: 0.518\n",
"epoch: 3900, acc: 0.780, loss: 0.516\n",
"epoch: 4000, acc: 0.780, loss: 0.515\n",
"epoch: 4100, acc: 0.773, loss: 0.513\n",
"epoch: 4200, acc: 0.770, loss: 0.511\n",
"epoch: 4300, acc: 0.773, loss: 0.510\n",
"epoch: 4400, acc: 0.777, loss: 0.509\n",
"epoch: 4500, acc: 0.777, loss: 0.508\n",
"epoch: 4600, acc: 0.780, loss: 0.507\n",
"epoch: 4700, acc: 0.777, loss: 0.506\n",
"epoch: 4800, acc: 0.777, loss: 0.504\n",
"epoch: 4900, acc: 0.783, loss: 0.503\n",
"epoch: 5000, acc: 0.783, loss: 0.503\n",
"epoch: 5100, acc: 0.787, loss: 0.502\n",
"epoch: 5200, acc: 0.790, loss: 0.501\n",
"epoch: 5300, acc: 0.787, loss: 0.500\n",
"epoch: 5400, acc: 0.787, loss: 0.498\n",
"epoch: 5500, acc: 0.783, loss: 0.497\n",
"epoch: 5600, acc: 0.787, loss: 0.496\n",
"epoch: 5700, acc: 0.780, loss: 0.495\n",
"epoch: 5800, acc: 0.780, loss: 0.495\n",
"epoch: 5900, acc: 0.783, loss: 0.494\n",
"epoch: 6000, acc: 0.790, loss: 0.494\n",
"epoch: 6100, acc: 0.777, loss: 0.493\n",
"epoch: 6200, acc: 0.783, loss: 0.492\n",
"epoch: 6300, acc: 0.783, loss: 0.491\n",
"epoch: 6400, acc: 0.790, loss: 0.490\n",
"epoch: 6500, acc: 0.780, loss: 0.488\n",
"epoch: 6600, acc: 0.780, loss: 0.487\n",
"epoch: 6700, acc: 0.777, loss: 0.485\n",
"epoch: 6800, acc: 0.780, loss: 0.483\n",
"epoch: 6900, acc: 0.783, loss: 0.482\n",
"epoch: 7000, acc: 0.790, loss: 0.480\n",
"epoch: 7100, acc: 0.790, loss: 0.479\n",
"epoch: 7200, acc: 0.797, loss: 0.477\n",
"epoch: 7300, acc: 0.803, loss: 0.476\n",
"epoch: 7400, acc: 0.813, loss: 0.475\n",
"epoch: 7500, acc: 0.813, loss: 0.474\n",
"epoch: 7600, acc: 0.813, loss: 0.472\n",
"epoch: 7700, acc: 0.813, loss: 0.471\n",
"epoch: 7800, acc: 0.810, loss: 0.470\n",
"epoch: 7900, acc: 0.810, loss: 0.469\n",
"epoch: 8000, acc: 0.810, loss: 0.468\n",
"epoch: 8100, acc: 0.810, loss: 0.465\n",
"epoch: 8200, acc: 0.807, loss: 0.463\n",
"epoch: 8300, acc: 0.803, loss: 0.462\n",
"epoch: 8400, acc: 0.803, loss: 0.461\n",
"epoch: 8500, acc: 0.807, loss: 0.459\n",
"epoch: 8600, acc: 0.810, loss: 0.458\n",
"epoch: 8700, acc: 0.813, loss: 0.458\n",
"epoch: 8800, acc: 0.810, loss: 0.456\n",
"epoch: 8900, acc: 0.810, loss: 0.455\n",
"epoch: 9000, acc: 0.813, loss: 0.452\n",
"epoch: 9100, acc: 0.813, loss: 0.450\n",
"epoch: 9200, acc: 0.817, loss: 0.448\n",
"epoch: 9300, acc: 0.810, loss: 0.447\n",
"epoch: 9400, acc: 0.810, loss: 0.446\n",
"epoch: 9500, acc: 0.813, loss: 0.444\n",
"epoch: 9600, acc: 0.813, loss: 0.441\n",
"epoch: 9700, acc: 0.817, loss: 0.440\n",
"epoch: 9800, acc: 0.817, loss: 0.438\n",
"epoch: 9900, acc: 0.813, loss: 0.436\n",
"epoch: 10000, acc: 0.813, loss: 0.435\n"
]
}
],
"source": [
"# Create dataset\n",
"X, y = spiral_data(samples=100, classes=3)\n",
"\n",
"# Create Dense layer with 2 input features and 64 output values\n",
"dense1 = Layer_Dense(2, 64)\n",
"\n",
"# Create ReLU activation (to be used with Dense layer)\n",
"activation1 = Activation_ReLU()\n",
"\n",
"# Create second Dense layer with 64 input features (as we take output\n",
"# of previous layer here) and 3 output values (output values)\n",
"dense2 = Layer_Dense(64, 3)\n",
"\n",
"# Create Softmax classifier's combined loss and activation\n",
"loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()\n",
"\n",
"# Create optimizer\n",
"optimizer = Optimizer_Adagrad(learning_rate=1.0, decay=1e-4)\n",
"\n",
"# Train in loop\n",
"for epoch in range(10001):\n",
" # Perform a forward pass of our training data through this layer\n",
" dense1.forward(X)\n",
" \n",
" # Perform a forward pass through activation function\n",
" # takes the output of first dense layer here\n",
" activation1.forward(dense1.output)\n",
" \n",
" # Perform a forward pass through second Dense layer\n",
" # takes outputs of activation function of first layer as inputs\n",
" dense2.forward(activation1.output)\n",
" \n",
" # Perform a forward pass through the activation/loss function\n",
" # takes the output of second dense layer here and returns loss\n",
" loss = loss_activation.forward(dense2.output, y)\n",
" \n",
" # Calculate accuracy from output of activation2 and targets\n",
" # calculate values along first axis\n",
" predictions = np.argmax(loss_activation.output, axis=1)\n",
" if len(y.shape) == 2:\n",
" y = np.argmax(y, axis=1)\n",
" accuracy = np.mean(predictions == y)\n",
" \n",
" if not epoch % 100:\n",
" print(f'epoch: {epoch}, ' +\n",
" f'acc: {accuracy:.3f}, ' +\n",
" f'loss: {loss:.3f}')\n",
" \n",
" # Backward pass\n",
" loss_activation.backward(loss_activation.output, y)\n",
" dense2.backward(loss_activation.dinputs)\n",
" activation1.backward(dense2.dinputs)\n",
" dense1.backward(activation1.dinputs)\n",
" \n",
" # Update weights and biases\n",
" optimizer.pre_update_params()\n",
" optimizer.update_params(dense1)\n",
" optimizer.update_params(dense2)\n",
" optimizer.post_update_params()\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python"
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,