123 lines
3.4 KiB
Plaintext
123 lines
3.4 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Iteration 0, Loss: 466.56000000000006\n",
|
|
"Iteration 20, Loss: 5.32959636083938\n",
|
|
"Iteration 40, Loss: 0.41191523404899866\n",
|
|
"Iteration 60, Loss: 0.031836212079467595\n",
|
|
"Iteration 80, Loss: 0.002460565465389601\n",
|
|
"Iteration 100, Loss: 0.000190172825660145\n",
|
|
"Iteration 120, Loss: 1.4698126966451542e-05\n",
|
|
"Iteration 140, Loss: 1.1359926717815175e-06\n",
|
|
"Iteration 160, Loss: 8.779889800154524e-08\n",
|
|
"Iteration 180, Loss: 6.7858241357822796e-09\n",
|
|
"Final weights:\n",
|
|
" [[-0.00698895 -0.01397789 -0.02096684 -0.02795579]\n",
|
|
" [ 0.25975286 0.11950572 -0.02074143 -0.16098857]\n",
|
|
" [ 0.53548461 0.27096922 0.00645383 -0.25806156]]\n",
|
|
"Final biases:\n",
|
|
" [-0.00698895 -0.04024714 -0.06451539]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"\n",
|
|
"# Initial inputs\n",
|
|
"inputs = np.array([1, 2, 3, 4])\n",
|
|
"\n",
|
|
"# Initial weights and biases\n",
|
|
"weights = np.array([\n",
|
|
" [0.1, 0.2, 0.3, 0.4],\n",
|
|
" [0.5, 0.6, 0.7, 0.8],\n",
|
|
" [0.9, 1.0, 1.1, 1.2]\n",
|
|
"])\n",
|
|
"\n",
|
|
"biases = np.array([0.1, 0.2, 0.3])\n",
|
|
"\n",
|
|
"# Learning rate\n",
|
|
"learning_rate = 0.001\n",
|
|
"\n",
|
|
"# ReLU activation function and its derivative\n",
|
|
"def relu(x):\n",
|
|
" return np.maximum(0, x)\n",
|
|
"\n",
|
|
"def relu_derivative(x):\n",
|
|
" return np.where(x > 0, 1, 0)\n",
|
|
"\n",
|
|
"# Training loop\n",
|
|
"for iteration in range(200):\n",
|
|
" # Forward pass\n",
|
|
" z = np.dot(weights, inputs) + biases\n",
|
|
" a = relu(z)\n",
|
|
" y = np.sum(a)\n",
|
|
"\n",
|
|
" # Calculate loss\n",
|
|
" loss = y ** 2\n",
|
|
"\n",
|
|
" # Backward pass\n",
|
|
" # Gradient of loss with respect to output y\n",
|
|
" dL_dy = 2 * y\n",
|
|
"\n",
|
|
" # Gradient of y with respect to a\n",
|
|
" dy_da = np.ones_like(a)\n",
|
|
"\n",
|
|
" # Gradient of loss with respect to a\n",
|
|
" dL_da = dL_dy * dy_da\n",
|
|
"\n",
|
|
" # Gradient of a with respect to z (ReLU derivative)\n",
|
|
" da_dz = relu_derivative(z)\n",
|
|
"\n",
|
|
" # Gradient of loss with respect to z\n",
|
|
" dL_dz = dL_da * da_dz\n",
|
|
"\n",
|
|
" # Gradient of z with respect to weights and biases\n",
|
|
" dL_dW = np.outer(dL_dz, inputs)\n",
|
|
" dL_db = dL_dz\n",
|
|
"\n",
|
|
" # Update weights and biases\n",
|
|
" weights -= learning_rate * dL_dW\n",
|
|
" biases -= learning_rate * dL_db\n",
|
|
"\n",
|
|
" # Print the loss every 20 iterations\n",
|
|
" if iteration % 20 == 0:\n",
|
|
" print(f\"Iteration {iteration}, Loss: {loss}\")\n",
|
|
"\n",
|
|
"# Final weights and biases\n",
|
|
"print(\"Final weights:\\n\", weights)\n",
|
|
"print(\"Final biases:\\n\", biases)\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.12"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|