Lecture 6
This commit is contained in:
parent
76a36634a6
commit
00527a1e7c
@ -647,18 +647,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[0. 0. 0. ]\n",
|
||||
" [0.00013767 0. 0. ]\n",
|
||||
" [0.00022187 0. 0. ]\n",
|
||||
" [0.0004077 0. 0. ]\n",
|
||||
" [0.00054541 0. 0. ]]\n"
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'np' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[0;32m/tmp/ipykernel_49133/797568168.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mspiral_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msamples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclasses\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# Create Dense layer with 2 input features and 3 output values\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0mdense1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLayer_Dense\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;31m# Create ReLU activation (to be used with Dense layer):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0mactivation1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mActivation_ReLU\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;32m/tmp/ipykernel_49133/797568168.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, n_inputs, n_neurons)\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_neurons\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# Initialize the weights and biases\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweights\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0.01\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_neurons\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Normal distribution of weights\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbiases\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_neurons\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;31mNameError\u001b[0m: name 'np' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@ -255,7 +255,7 @@
|
||||
" self.biases = np.zeros((1, n_neurons))\n",
|
||||
"\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" # Calculat ethe output values from inputs, weights, and biases\n",
|
||||
" # Calculate the output values from inputs, weights, and biases\n",
|
||||
" self.output = np.dot(inputs, self.weights) + self.biases # Weights are already transposed\n",
|
||||
"\n",
|
||||
"# Create a dataset\n",
|
||||
@ -414,7 +414,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Activation Function: ReLU\n",
|
||||
"Rectified Linear Unit\n"
|
||||
"Rectified Linear Unit. Only passes through positive values.\n",
|
||||
"\n",
|
||||
"y = max(0, x)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -422,7 +424,165 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"class Activation_ReLU:\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" self.output = np.maximum(0, inputs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[0. 0. 0. ]\n",
|
||||
" [0. 0.00011395 0. ]\n",
|
||||
" [0. 0.00031729 0. ]\n",
|
||||
" [0. 0.00052666 0. ]\n",
|
||||
" [0. 0.00071401 0. ]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import nnfs\n",
|
||||
"from nnfs.datasets import spiral_data\n",
|
||||
"nnfs.init()\n",
|
||||
"\n",
|
||||
"class Layer_Dense:\n",
|
||||
" def __init__(self, n_inputs, n_neurons):\n",
|
||||
" # Initialize the weights and biases\n",
|
||||
" self.weights = 0.01 * np.random.randn(n_inputs, n_neurons) # Normal distribution of weights\n",
|
||||
" self.biases = np.zeros((1, n_neurons))\n",
|
||||
"\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" # Calculate the output values from inputs, weights, and biases\n",
|
||||
" self.output = np.dot(inputs, self.weights) + self.biases # Weights are already transposed\n",
|
||||
"\n",
|
||||
"class Activation_ReLU:\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" self.output = np.maximum(0, inputs)\n",
|
||||
"\n",
|
||||
"# Create a dataset\n",
|
||||
"X, y = spiral_data(samples=100, classes=3)\n",
|
||||
"\n",
|
||||
"# Create a dense layer with 2 inputs and 3 output values\n",
|
||||
"dense1 = Layer_Dense(2, 3)\n",
|
||||
"\n",
|
||||
"# Create ReLU activation\n",
|
||||
"activation1 = Activation_ReLU()\n",
|
||||
"\n",
|
||||
"# Perform a forward pass of the dataset through the dense layer\n",
|
||||
"dense1.forward(X)\n",
|
||||
"\n",
|
||||
"# Pass the output from the layer through the ReLU activation function\n",
|
||||
"activation1.forward(dense1.output)\n",
|
||||
"\n",
|
||||
"# Print just the first few outputs from ReLU\n",
|
||||
"print(activation1.output[:5])\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Activation Function: Softmax\n",
|
||||
"Output falls between (0, 1) and relates the output as a probability.\n",
|
||||
"\n",
|
||||
"Given 3 outputs o1, o2, o3, oi = e^{oi} / (e^{o1} + e^{o2} + e^{o3})\n",
|
||||
"\n",
|
||||
"Sum of the outputs is 1.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Activation_Softmax:\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" # Get the unnormalized probabilities\n",
|
||||
" # Subtract max from the row to prevent larger numbers\n",
|
||||
" exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True))\n",
|
||||
"\n",
|
||||
" # Normalize the probabilities\n",
|
||||
" probabilities = exp_values / np.sum(exp_values, axis=1,keepdims=True)\n",
|
||||
" self.output = probabilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[0.33333334 0.33333334 0.33333334]\n",
|
||||
" [0.33333316 0.3333332 0.33333364]\n",
|
||||
" [0.33333287 0.3333329 0.33333418]\n",
|
||||
" [0.3333326 0.33333263 0.33333477]\n",
|
||||
" [0.33333233 0.3333324 0.33333528]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import nnfs\n",
|
||||
"from nnfs.datasets import spiral_data\n",
|
||||
"nnfs.init()\n",
|
||||
"\n",
|
||||
"class Layer_Dense:\n",
|
||||
" def __init__(self, n_inputs, n_neurons):\n",
|
||||
" # Initialize the weights and biases\n",
|
||||
" self.weights = 0.01 * np.random.randn(n_inputs, n_neurons) # Normal distribution of weights\n",
|
||||
" self.biases = np.zeros((1, n_neurons))\n",
|
||||
"\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" # Calculate the output values from inputs, weights, and biases\n",
|
||||
" self.output = np.dot(inputs, self.weights) + self.biases # Weights are already transposed\n",
|
||||
"\n",
|
||||
"class Activation_ReLU:\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" self.output = np.maximum(0, inputs)\n",
|
||||
" \n",
|
||||
"class Activation_Softmax:\n",
|
||||
" def forward(self, inputs):\n",
|
||||
" # Get the unnormalized probabilities\n",
|
||||
" # Subtract max from the row to prevent larger numbers\n",
|
||||
" exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True))\n",
|
||||
"\n",
|
||||
" # Normalize the probabilities with element wise division\n",
|
||||
" probabilities = exp_values / np.sum(exp_values, axis=1,keepdims=True)\n",
|
||||
" self.output = probabilities\n",
|
||||
"\n",
|
||||
"# Create dataset\n",
|
||||
"X, y = spiral_data(samples=100, classes=3)\n",
|
||||
"\n",
|
||||
"dense1 = Layer_Dense(2, 3)\n",
|
||||
"activation1 = Activation_ReLU()\n",
|
||||
"dense2 = Layer_Dense(3, 3)\n",
|
||||
"activation2 = Activation_Softmax()\n",
|
||||
"\n",
|
||||
"# Make a forward pass of our training data through this layer\n",
|
||||
"dense1.forward(X)\n",
|
||||
"\n",
|
||||
"# Make a forward pass through activation function\n",
|
||||
"activation1.forward(dense1.output)\n",
|
||||
"# Make a forward pass through second Dense layer\n",
|
||||
"dense2.forward(activation1.output)\n",
|
||||
"# Make a forward pass through activation function\n",
|
||||
"activation2.forward(dense2.output)\n",
|
||||
"# Let's see output of the first few samples:\n",
|
||||
"print(activation2.output[:5])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user