Lecture 3 + HW
This commit is contained in:
parent
2a457caa2b
commit
f7d8e7524f
Binary file not shown.
@ -79,4 +79,50 @@ bias = 2
|
||||
output = np.dot(inputs, weights) + bias
|
||||
print(output)
|
||||
|
||||
# %% [markdown]
|
||||
# # Layer of Neurons Using Numpy
|
||||
|
||||
# %%
|
||||
import numpy as np
|
||||
|
||||
inputs = [1.0, 2.0, 3.0, 2.5]
|
||||
weights = np.array([
|
||||
[0.2, 0.8, -0.5, 1],
|
||||
[0.5, -0.91, 0.26, -0.5],
|
||||
[-0.26, -0.27, 0.17, 0.87]
|
||||
])
|
||||
biases = [2.0, 3.0, 0.5]
|
||||
|
||||
layer_outputs = np.dot(weights, inputs) + biases
|
||||
# must be dot(weights, inputs), not dot(inputs, weights)
|
||||
# this takes the dot each row of the weights by the column of inputs (remember the second term is transposed)
|
||||
print(layer_outputs)
|
||||
|
||||
layer_outputs_2 = np.dot(inputs, weights.T) + biases
|
||||
# this takes each input and multiplies by the weight. also correct.
|
||||
print(layer_outputs_2)
|
||||
|
||||
# %% [markdown]
|
||||
# # Layer of Neurons and Batch of Data Using Numpy
|
||||
# Batch of data is simply a set of inputs.
|
||||
|
||||
# %%
|
||||
import numpy as np
|
||||
|
||||
inputs = [ # Batch of inputs
|
||||
[1.0, 2.0, 3.0, 2.5],
|
||||
[2.0, 5.0, -1.0, 2.0],
|
||||
[-1.5, 2.7, 3.3, -0.8]
|
||||
]
|
||||
weights = np.array([
|
||||
[0.2, 0.8, -0.5, 1],
|
||||
[0.5, -0.91, 0.26, -0.5],
|
||||
[-0.26, -0.27, 0.17, 0.87]
|
||||
])
|
||||
biases = [2.0, 3.0, 0.5]
|
||||
|
||||
outputs = np.dot(inputs, weights.T) + biases
|
||||
# For every row of inputs, compute the dot of input set and weights
|
||||
print(outputs)
|
||||
|
||||
|
||||
|
||||
205
lecture03/notes_03.ipynb
Normal file
205
lecture03/notes_03.ipynb
Normal file
@ -0,0 +1,205 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Layer of Neurons and Batch of Data Using Numpy\n",
|
||||
"From lecture 1, 1 layer of 3 neurons"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[ 4.8 1.21 2.385]\n",
|
||||
" [ 8.9 -1.81 0.2 ]\n",
|
||||
" [ 1.41 1.051 0.026]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"inputs = [ # Batch of inputs\n",
|
||||
" [1.0, 2.0, 3.0, 2.5], \n",
|
||||
" [2.0, 5.0, -1.0, 2.0], \n",
|
||||
" [-1.5, 2.7, 3.3, -0.8]\n",
|
||||
"]\n",
|
||||
"weights = np.array([\n",
|
||||
" [0.2, 0.8, -0.5, 1],\n",
|
||||
" [0.5, -0.91, 0.26, -0.5],\n",
|
||||
" [-0.26, -0.27, 0.17, 0.87]\n",
|
||||
"])\n",
|
||||
"biases = [2.0, 3.0, 0.5]\n",
|
||||
"\n",
|
||||
"outputs = np.dot(inputs, weights.T) + biases\n",
|
||||
"# For every row of inputs, compute the dot of input set and weights\n",
|
||||
"print(outputs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 2 Layers and Batch of Data Using Numpy\n",
|
||||
"2 layers. Layer 1 and layer 2 have 3 neurons. Therefore, there are 3 final outputs.\n",
|
||||
"\n",
|
||||
"There are 3 batches of input data to generate 3 total output arrays."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[ 0.5031 -1.04185 -2.03875]\n",
|
||||
" [ 0.2434 -2.7332 -5.7633 ]\n",
|
||||
" [-0.99314 1.41254 -0.35655]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"inputs = [[1, 2, 3, 2.5],\n",
|
||||
" [2., 5., -1., 2],\n",
|
||||
" [-1.5, 2.7, 3.3, -0.8]]\n",
|
||||
"\n",
|
||||
"weights = [[0.2, 0.8, -0.5, 1],\n",
|
||||
" [0.5, -0.91, 0.26, -0.5],\n",
|
||||
" [-0.26, -0.27, 0.17, 0.87]]\n",
|
||||
"\n",
|
||||
"biases = [2, 3, 0.5]\n",
|
||||
"\n",
|
||||
"weights2 = [[0.1, -0.14, 0.5],\n",
|
||||
" [-0.5, 0.12, -0.33],\n",
|
||||
" [-0.44, 0.73, -0.13]]\n",
|
||||
"\n",
|
||||
"biases2 = [-1, 2, -0.5]\n",
|
||||
"\n",
|
||||
"# Make the lists np.arrays so we can transpose them\n",
|
||||
"inputs_array = np.array(inputs)\n",
|
||||
"weights_array = np.array(weights)\n",
|
||||
"biases_array = np.array(biases)\n",
|
||||
"weights2_array = np.array(weights2)\n",
|
||||
"biases2_array = np.array(biases2)\n",
|
||||
"\n",
|
||||
"# Get the output of the first layer\n",
|
||||
"layer1_outputs = np.dot(inputs_array, weights_array.T) + biases_array\n",
|
||||
"\n",
|
||||
"# Feed the output of the first layer into the second layer with their own weights\n",
|
||||
"layer2_outputs = np.dot(layer1_outputs, weights2_array.T) + biases2_array\n",
|
||||
"\n",
|
||||
"print(layer2_outputs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 4 Layers and Batch of Data Using Numpy\n",
|
||||
"Cascading neural network with 4 layers. Layer 1 has 4 neurons, layer 2 has 3, layer 3 has 2 and layer 4 has a single output."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[0.47467235]\n",
|
||||
" [0.7219189 ]\n",
|
||||
" [0.0541053 ]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"# Batch of inputs\n",
|
||||
"inputs = [\n",
|
||||
" [1, 2, 3, 2.5],\n",
|
||||
" [2., 5., -1., 2],\n",
|
||||
" [-1.5, 2.7, 3.3, -0.8]\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Weights are stored in a list of np.array\n",
|
||||
"# index 0 = layer 1 weights and so on\n",
|
||||
"weights = [\n",
|
||||
" np.array([ # Layer 1\n",
|
||||
" [0.2, 0.8, -0.5, 1], # Neuron 1.1\n",
|
||||
" [0.5, -0.91, 0.26, -0.5], # Neuron 1.2\n",
|
||||
" [-0.26, -0.27, 0.17, 0.87], # Neuron 1.3\n",
|
||||
" [-0.27, 0.87, 0.2, 0.8], # Neuron 1.4\n",
|
||||
" ]),\n",
|
||||
" np.array([ # Layer 2\n",
|
||||
" [0.1, 0.65, -0.24, 1.2], # Neuron 2.1\n",
|
||||
" [0.51, -0.21, 0.206, -0.05], # Neuron 2.2\n",
|
||||
" [-0.46, -0.67, 0.14, 0.37], # Neuron 2.3\n",
|
||||
" ]),\n",
|
||||
" np.array([ # Layer 3\n",
|
||||
" [0.25, 0.4, -0.2], # Neuron 3.1\n",
|
||||
" [0.58, -0.25, 0.26], # Neuron 3.2\n",
|
||||
" ]),\n",
|
||||
" np.array([ # Layer 4\n",
|
||||
" [0.3, 0.1], # Neuron 4.1\n",
|
||||
" ])\n",
|
||||
"]\n",
|
||||
"biases = [\n",
|
||||
" [0.5, 2, 1, -2], # Layer 1\n",
|
||||
" [0.2, -0.6, 1.3,], # Layer 2\n",
|
||||
" [-1, 0.5,], # Layer 3\n",
|
||||
" [0.28], # Layer 4\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Iterate through each layer, get the output of the layer, use it as the input for the next layer\n",
|
||||
"layer_inputs = inputs\n",
|
||||
"layer_outputs = np.array([])\n",
|
||||
"for layer in range(len(weights)):\n",
|
||||
" layer_weights = weights[layer]\n",
|
||||
" layer_biases = biases[layer]\n",
|
||||
" layer_outputs = np.dot(layer_inputs, layer_weights.T) + layer_biases\n",
|
||||
" \n",
|
||||
" # Update the inputs for the next layer based on the outputs of this layer\n",
|
||||
" layer_inputs = layer_outputs\n",
|
||||
"# layer_outputs is left holding the final outputs of the network\n",
|
||||
"\n",
|
||||
"print(layer_outputs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user