Lecture 7
This commit is contained in:
parent
00527a1e7c
commit
40fd334e0e
File diff suppressed because one or more lines are too long
117
lecture07/notes_07.ipynb
Normal file
117
lecture07/notes_07.ipynb
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Previous Class Definitions\n",
|
||||||
|
"The previously defined Layer_Dense, Activation_ReLU, and Activation_Softmax"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"vscode": {
|
||||||
|
"languageId": "plaintext"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import nnfs\n",
|
||||||
|
"from nnfs.datasets import spiral_data\n",
|
||||||
|
"nnfs.init()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"vscode": {
|
||||||
|
"languageId": "plaintext"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class Layer_Dense:\n",
|
||||||
|
" def __init__(self, n_inputs, n_neurons):\n",
|
||||||
|
" # Initialize the weights and biases\n",
|
||||||
|
" self.weights = 0.01 * np.random.randn(n_inputs, n_neurons) # Normal distribution of weights\n",
|
||||||
|
" self.biases = np.zeros((1, n_neurons))\n",
|
||||||
|
"\n",
|
||||||
|
" def forward(self, inputs):\n",
|
||||||
|
" # Calculate the output values from inputs, weights, and biases\n",
|
||||||
|
" self.output = np.dot(inputs, self.weights) + self.biases # Weights are already transposed\n",
|
||||||
|
"\n",
|
||||||
|
"class Activation_ReLU:\n",
|
||||||
|
" def forward(self, inputs):\n",
|
||||||
|
" self.output = np.maximum(0, inputs)\n",
|
||||||
|
" \n",
|
||||||
|
"class Activation_Softmax:\n",
|
||||||
|
" def forward(self, inputs):\n",
|
||||||
|
" # Get the unnormalized probabilities\n",
|
||||||
|
" # Subtract max from the row to prevent larger numbers\n",
|
||||||
|
" exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True))\n",
|
||||||
|
"\n",
|
||||||
|
" # Normalize the probabilities with element wise division\n",
|
||||||
|
" probabilities = exp_values / np.sum(exp_values, axis=1,keepdims=True)\n",
|
||||||
|
" self.output = probabilities"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Forward Pass with No Loss Consideration\n",
|
||||||
|
"2 input neural network with 2 layers of 3 neurons each. ReLU activation in the first layer with Softmax in the second layer to normalize the outputs."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"vscode": {
|
||||||
|
"languageId": "plaintext"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Create dataset\n",
|
||||||
|
"X, y = spiral_data(samples=100, classes=3)\n",
|
||||||
|
"# Create Dense layer with 2 input features and 3 output values\n",
|
||||||
|
"dense1 = Layer_Dense(2, 3)\n",
|
||||||
|
"# Create ReLU activation (to be used with Dense layer):\n",
|
||||||
|
"activation1 = Activation_ReLU()\n",
|
||||||
|
"# Create second Dense layer with 3 input features (as we take output\n",
|
||||||
|
"# of previous layer here) and 3 output values\n",
|
||||||
|
"dense2 = Layer_Dense(3, 3)\n",
|
||||||
|
"# Create Softmax activation (to be used with Dense layer):\n",
|
||||||
|
"activation2 = Activation_Softmax()\n",
|
||||||
|
"\n",
|
||||||
|
"# Make a forward pass of our training data through this layer\n",
|
||||||
|
"dense1.forward(X)\n",
|
||||||
|
"\n",
|
||||||
|
"# Make a forward pass through activation function\n",
|
||||||
|
"# it takes the output of first dense layer here\n",
|
||||||
|
"activation1.forward(dense1.output)\n",
|
||||||
|
"# Make a forward pass through second Dense layer\n",
|
||||||
|
"# it takes outputs of activation function of first layer as inputs\n",
|
||||||
|
"dense2.forward(activation1.output)\n",
|
||||||
|
"# Make a forward pass through activation function\n",
|
||||||
|
"# it takes the output of second dense layer here\n",
|
||||||
|
"activation2.forward(dense2.output)\n",
|
||||||
|
"# Let's see output of the first few samples:\n",
|
||||||
|
"print(activation2.output[:5])"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"language_info": {
|
||||||
|
"name": "python"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user