Compare commits
160 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70878c94be | ||
|
|
832a3c9104 | ||
|
|
68cd38bf2f | ||
|
|
964d26c684 | ||
|
|
c1d56880a6 | ||
|
|
0cd321fb96 | ||
|
|
043969fb79 | ||
|
|
4052d29b0a | ||
|
|
19ee3afd90 | ||
|
|
91f99c7398 | ||
|
|
96fe95cea1 | ||
|
|
fc35dc4168 | ||
|
|
80497e298d | ||
|
|
8c658ac321 | ||
|
|
4a08fa44bf | ||
|
|
c5755efe68 | ||
|
|
55b425b41b | ||
|
|
e000397470 | ||
|
|
b4d0b49776 | ||
|
|
f5c1b2af2e | ||
|
|
1cf990bfe7 | ||
|
|
ae967f8e7e | ||
|
|
690d338406 | ||
|
|
5b845f7d7b | ||
|
|
d4de5f324d | ||
|
|
958b9d543e | ||
|
|
604260ebdf | ||
|
|
fc7503e068 | ||
|
|
2f25abacdb | ||
|
|
14b7523a86 | ||
|
|
d8d7eee7b1 | ||
|
|
9cbd56dff3 | ||
|
|
9f3099b97f | ||
|
|
516c7db675 | ||
|
|
4a74a9d0be | ||
|
|
e1544de817 | ||
|
|
7d120525ad | ||
|
|
e30926679d | ||
|
|
70748bde97 | ||
|
|
374db64950 | ||
|
|
80894cea1f | ||
|
|
45791c3951 | ||
|
|
d7635a0fc8 | ||
|
|
d93bc911ee | ||
|
|
7c06d80d1a | ||
|
|
bc0f29c751 | ||
|
|
35150d7ec7 | ||
|
|
7e9954855c | ||
|
|
f65e5c7fbb | ||
|
|
fb742f12ee | ||
|
|
bfc32d5cfa | ||
|
|
2fd5a0cd12 | ||
|
|
7f4cf1acb7 | ||
|
|
b6bc481fec | ||
|
|
513fb0eba6 | ||
|
|
5bf3cf2474 | ||
|
|
6ae8b99899 | ||
|
|
fdc1efce08 | ||
|
|
4199a20fd2 | ||
|
|
b576187c01 | ||
|
|
ffc6df0849 | ||
|
|
17eec0ba60 | ||
|
|
ffcc6d3c32 | ||
|
|
1c9dd9f387 | ||
|
|
624dbee611 | ||
|
|
ef2920a5fc | ||
|
|
7809de3d7f | ||
|
|
a7bb4e5ce8 | ||
|
|
a09e3cde2e | ||
|
|
ea03aca917 | ||
|
|
c84eedd252 | ||
|
|
b085766752 | ||
|
|
759d566a0f | ||
|
|
fd65e9bce7 | ||
|
|
8a02341771 | ||
|
|
dd4926dfde | ||
|
|
ea959297b8 | ||
|
|
6be2911ec8 | ||
|
|
88e8e3b336 | ||
|
|
ee6960bf0f | ||
|
|
eb10907982 | ||
|
|
ef1c6aaec9 | ||
|
|
76df999fec | ||
|
|
238285d363 | ||
|
|
2417379be3 | ||
|
|
c2bf535ad2 | ||
|
|
322b3da22b | ||
|
|
46bc9d5723 | ||
|
|
b000fafc0d | ||
|
|
40201c4604 | ||
|
|
f41ba979f8 | ||
|
|
b8fe99a6d3 | ||
|
|
a20bf8f59a | ||
|
|
cf8d35aa28 | ||
|
|
199a0bd2d5 | ||
|
|
45bc31cc39 | ||
|
|
f6eedcdc51 | ||
|
|
a983fb78c8 | ||
|
|
cd0d587e08 | ||
|
|
885cb786cb | ||
|
|
a86e4bca4f | ||
|
|
9e5700c6c7 | ||
|
|
559807f22c | ||
|
|
d7ff1e1531 | ||
|
|
ef0a0397ce | ||
|
|
5f0a4970e5 | ||
|
|
1022439364 | ||
|
|
80fff5c1ce | ||
|
|
e82701b1fd | ||
|
|
e7a3e4c633 | ||
|
|
9ffd7c710d | ||
|
|
832bb929a2 | ||
|
|
9da150a703 | ||
|
|
59ae70fe74 | ||
|
|
d4ad50fa6a | ||
|
|
bdea676518 | ||
|
|
ffae34bc5e | ||
|
|
37111ff72c | ||
|
|
75ddf8c16f | ||
|
|
37d21102da | ||
|
|
f09c84de02 | ||
|
|
26e80dbcc2 | ||
|
|
8e19360163 | ||
|
|
6cd2aab77c | ||
|
|
34a7775f9f | ||
|
|
f1b5b6fee5 | ||
|
|
a82a3ce69c | ||
|
|
3b59907c9c | ||
|
|
e5b7eaed92 | ||
|
|
90e3138acf | ||
|
|
9fa2e5c372 | ||
|
|
45ddce0cd2 | ||
|
|
a0a29a9a6b | ||
|
|
25df06a02d | ||
|
|
9bb3f672d8 | ||
|
|
b5fbe8445e | ||
|
|
fd0144d4ab | ||
|
|
4335f935a1 | ||
|
|
45ddca3c52 | ||
|
|
b52d05a785 | ||
|
|
61316a273b | ||
|
|
25b84c5cef | ||
|
|
57e5958296 | ||
|
|
25e0c17f54 | ||
|
|
ea08e72ce7 | ||
|
|
863a5db7fa | ||
|
|
4ba1a3a18e | ||
|
|
96c48791b9 | ||
|
|
bf7ef7f552 | ||
|
|
a454a074f6 | ||
|
|
247acf8e38 | ||
|
|
11be14e1eb | ||
|
|
ba250e5a23 | ||
|
|
13f3dae8f4 | ||
|
|
d2b5b4f3fc | ||
|
|
39dded5a11 | ||
|
|
7897eab367 | ||
|
|
003ec5c4c9 | ||
|
|
222fb3a66e | ||
|
|
7dec3bd387 |
@@ -429,4 +429,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -250,4 +250,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -645,4 +645,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -444,4 +444,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -289,4 +289,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -287,4 +287,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -219,4 +219,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -445,4 +445,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -314,4 +314,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -417,4 +417,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -347,4 +347,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -348,4 +348,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -295,4 +295,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -451,4 +451,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -561,4 +561,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -450,4 +450,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -444,4 +444,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -626,4 +626,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -187,4 +187,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -424,4 +424,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -582,4 +582,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -631,4 +631,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
1
CM20315/Data/Info.txt
Normal file
1
CM20315/Data/Info.txt
Normal file
@@ -0,0 +1 @@
|
||||
Data for CM20315 practical
|
||||
|
Can't render this file because it is too large.
|
|
Can't render this file because it is too large.
|
1
CM20315/Info.txt
Normal file
1
CM20315/Info.txt
Normal file
@@ -0,0 +1 @@
|
||||
Practicals from CM20315 course taught at University of Bath, Fall 2022
|
||||
423
Notebooks/Chap01/1_1_BackgroundMathematics.ipynb
Normal file
423
Notebooks/Chap01/1_1_BackgroundMathematics.ipynb
Normal file
@@ -0,0 +1,423 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap01/1_1_BackgroundMathematics.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "s5zzKSOusPOB"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"# **Notebook 1.1 -- Background Mathematics**\n",
|
||||
"\n",
|
||||
"The purpose of this Python notebook is to make sure you can use CoLab and to familiarize yourself with some of the background mathematical concepts that you are going to need to understand deep learning. <br><br> It's not meant to be difficult and it may be that you know some or all of this information already.<br><br> Math is *NOT* a spectator sport. You won't learn it by just listening to lectures or reading books. It really helps to interact with it and explore yourself. <br><br> Work through the cells below, running each cell in turn. In various places you will see the words **\"TO DO\"**. Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "aUAjBbqzivMY"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "WV2Dl6owme2d"
|
||||
},
|
||||
"source": [
|
||||
"**Linear functions**<br> We will be using the term *linear equation* to mean a weighted sum of inputs plus an offset. If there is just one input $x$, then this is a straight line:\n",
|
||||
"\n",
|
||||
"\\begin{equation}y=\\beta+\\omega x,\\end{equation} <br>\n",
|
||||
"\n",
|
||||
"where $\\beta$ is the y-intercept of the linear and $\\omega$ is the slope of the line. When there are two inputs $x_{1}$ and $x_{2}$, then this becomes:\n",
|
||||
"\n",
|
||||
"\\begin{equation}y=\\beta+\\omega_1 x_1 + \\omega_2 x_2.\\end{equation} <br><br>\n",
|
||||
"\n",
|
||||
"Any other functions are by definition **non-linear**.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "WeFK4AvTotd8"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a linear function with just one input, x\n",
|
||||
"def linear_function_1D(x,beta,omega):\n",
|
||||
" # TODO -- replace the code line below with formula for 1D linear equation\n",
|
||||
" y = x\n",
|
||||
"\n",
|
||||
" return y"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "eimhJ8_jpmEp"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Plot the 1D linear function\n",
|
||||
"\n",
|
||||
"# Define an array of x values from 0 to 10 with increments of 0.1\n",
|
||||
"# https://numpy.org/doc/stable/reference/generated/numpy.arange.html\n",
|
||||
"x = np.arange(0.0,10.0, 0.01)\n",
|
||||
"# Compute y using the function you filled in above\n",
|
||||
"beta = 0.0; omega = 1.0\n",
|
||||
"\n",
|
||||
"y = linear_function_1D(x,beta,omega)\n",
|
||||
"\n",
|
||||
"# Plot this function\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(x,y,'r-')\n",
|
||||
"ax.set_ylim([0,10]);ax.set_xlim([0,10])\n",
|
||||
"ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||
"plt.show\n",
|
||||
"\n",
|
||||
"# TODO -- experiment with changing the values of beta and omega\n",
|
||||
"# to understand what they do. Try to make a line\n",
|
||||
"# that crosses the y-axis at y=10 and the x-axis at x=5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "AedfvD9dxShZ"
|
||||
},
|
||||
"source": [
|
||||
"Now let's investigate a 2D linear function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "57Gvkk-Ir_7b"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Code to draw 2D function -- read it so you know what is going on, but you don't have to change it\n",
|
||||
"def draw_2D_function(x1_mesh, x2_mesh, y):\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(7,7)\n",
|
||||
" pos = ax.contourf(x1_mesh, x2_mesh, y, levels=256 ,cmap = 'hot', vmin=-10,vmax=10.0)\n",
|
||||
" fig.colorbar(pos, ax=ax)\n",
|
||||
" ax.set_xlabel('x1');ax.set_ylabel('x2')\n",
|
||||
" levels = np.arange(-10,10,1.0)\n",
|
||||
" ax.contour(x1_mesh, x2_mesh, y, levels, cmap='winter')\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "YxeNhrXMzkZR"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a linear function with two inputs, x1 and x2\n",
|
||||
"def linear_function_2D(x1,x2,beta,omega1,omega2):\n",
|
||||
" # TODO -- replace the code line below with formula for 2D linear equation\n",
|
||||
" y = x1\n",
|
||||
"\n",
|
||||
" return y"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "rn_UBRDBysmR"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Plot the 2D function\n",
|
||||
"\n",
|
||||
"# Make 2D array of x and y points\n",
|
||||
"x1 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x2 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x1,x2 = np.meshgrid(x1,x2) # https://www.geeksforgeeks.org/numpy-meshgrid-function/\n",
|
||||
"\n",
|
||||
"# Compute the 2D function for given values of omega1, omega2\n",
|
||||
"beta = 0.0; omega1 = 1.0; omega2 = -0.5\n",
|
||||
"y = linear_function_2D(x1,x2,beta, omega1, omega2)\n",
|
||||
"\n",
|
||||
"# Draw the function.\n",
|
||||
"# Color represents y value (brighter = higher value)\n",
|
||||
"# Black = -10 or less, White = +10 or more\n",
|
||||
"# 0 = mid orange\n",
|
||||
"# Lines are conoturs where value is equal\n",
|
||||
"draw_2D_function(x1,x2,y)\n",
|
||||
"\n",
|
||||
"# TODO\n",
|
||||
"# Predict what this plot will look like if you set omega_1 to zero\n",
|
||||
"# Change the code and see if you are right.\n",
|
||||
"\n",
|
||||
"# TODO\n",
|
||||
"# Predict what this plot will look like if you set omega_2 to zero\n",
|
||||
"# Change the code and see if you are right.\n",
|
||||
"\n",
|
||||
"# TODO\n",
|
||||
"# Predict what this plot will look like if you set beta to -5\n",
|
||||
"# Change the code and see if you are correct\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "i8tLwpls476R"
|
||||
},
|
||||
"source": [
|
||||
"Often we will want to compute many linear functions at the same time. For example, we might have three inputs, $x_1$, $x_2$, and $x_3$ and want to compute two linear functions giving $y_1$ and $y_2$. Of course, we could do this by just running each equation separately,<br><br>\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}y_1 &=& \\beta_1 + \\omega_{11} x_1 + \\omega_{12} x_2 + \\omega_{13} x_3\\\\\n",
|
||||
"y_2 &=& \\beta_2 + \\omega_{21} x_1 + \\omega_{22} x_2 + \\omega_{23} x_3.\n",
|
||||
"\\end{eqnarray}<br>\n",
|
||||
"\n",
|
||||
"However, we can write it more compactly with vectors and matrices:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\begin{bmatrix} y_1\\\\ y_2 \\end{bmatrix} = \\begin{bmatrix}\\beta_{1}\\\\\\beta_{2}\\end{bmatrix}+ \\begin{bmatrix}\\omega_{11}&\\omega_{12}&\\omega_{13}\\\\\\omega_{21}&\\omega_{22}&\\omega_{23}\\end{bmatrix}\\begin{bmatrix}x_{1}\\\\x_{2}\\\\x_{3}\\end{bmatrix},\n",
|
||||
"\\end{equation}<br>\n",
|
||||
"or\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\mathbf{y} = \\boldsymbol\\beta +\\boldsymbol\\Omega\\mathbf{x}.\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"for short. Here, lowercase bold symbols are used for vectors. Upper case bold symbols are used for matrices.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "MjHXMavh9IUz"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a linear function with three inputs, x1, x2, and x_3\n",
|
||||
"def linear_function_3D(x1,x2,x3,beta,omega1,omega2,omega3):\n",
|
||||
" # TODO -- replace the code below with formula for a single 3D linear equation\n",
|
||||
" y = x1\n",
|
||||
"\n",
|
||||
" return y"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "fGzVJQ6N-mHJ"
|
||||
},
|
||||
"source": [
|
||||
"Let's compute two linear equations, using both the individual equations and the vector / matrix form and check they give the same result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Swd_bFIE9p2n"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define the parameters\n",
|
||||
"beta1 = 0.5; beta2 = 0.2\n",
|
||||
"omega11 = -1.0 ; omega12 = 0.4; omega13 = -0.3\n",
|
||||
"omega21 = 0.1 ; omega22 = 0.1; omega23 = 1.2\n",
|
||||
"\n",
|
||||
"# Define the inputs\n",
|
||||
"x1 = 4 ; x2 =-1; x3 = 2\n",
|
||||
"\n",
|
||||
"# Compute using the individual equations\n",
|
||||
"y1 = linear_function_3D(x1,x2,x3,beta1,omega11,omega12,omega13)\n",
|
||||
"y2 = linear_function_3D(x1,x2,x3,beta2,omega21,omega22,omega23)\n",
|
||||
"print(\"Individual equations\")\n",
|
||||
"print('y1 = %3.3f\\ny2 = %3.3f'%((y1,y2)))\n",
|
||||
"\n",
|
||||
"# Define vectors and matrices\n",
|
||||
"beta_vec = np.array([[beta1],[beta2]])\n",
|
||||
"omega_mat = np.array([[omega11,omega12,omega13],[omega21,omega22,omega23]])\n",
|
||||
"x_vec = np.array([[x1], [x2], [x3]])\n",
|
||||
"\n",
|
||||
"# Compute with vector/matrix form\n",
|
||||
"y_vec = beta_vec+np.matmul(omega_mat, x_vec)\n",
|
||||
"print(\"Matrix/vector form\")\n",
|
||||
"print('y1= %3.3f\\ny2 = %3.3f'%((y_vec[0],y_vec[1])))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "3LGRoTMLU8ZU"
|
||||
},
|
||||
"source": [
|
||||
"# Questions\n",
|
||||
"\n",
|
||||
"1. A single linear equation with three inputs (i.e. **linear_function_3D()**) associates a value y with each point in a 3D space ($x_1$,$x_2$,$x_3$). Is it possible to visualize this? What value is at position (0,0,0)?\n",
|
||||
"\n",
|
||||
"2. Write code to compute three linear equations with two inputs ($x_1$, $x_2$) using both the individual equations and the matrix form (you can make up any values for the inputs $\\beta_{i}$ and the slopes $\\omega_{ij}$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7Y5zdKtKZAB2"
|
||||
},
|
||||
"source": [
|
||||
"# Special functions\n",
|
||||
"\n",
|
||||
"Throughout the book, we'll be using some special functions (see Appendix B.1.3). The most important of these are the logarithm and exponential functions. Let's investigate their properties.\n",
|
||||
"\n",
|
||||
"We'll start with the exponential function $y=\\mbox{exp}[x]=e^x$ which maps the real line $[-\\infty,+\\infty]$ to non-negative numbers $[0,+\\infty]$."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "c_GkjiY9IWCu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Draw the exponential function\n",
|
||||
"\n",
|
||||
"# Define an array of x values from -5 to 5 with increments of 0.1\n",
|
||||
"x = np.arange(-5.0,5.0, 0.01)\n",
|
||||
"y = np.exp(x) ;\n",
|
||||
"\n",
|
||||
"# Plot this function\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(x,y,'r-')\n",
|
||||
"ax.set_ylim([0,100]);ax.set_xlim([-5,5])\n",
|
||||
"ax.set_xlabel('x'); ax.set_ylabel('exp[x]')\n",
|
||||
"plt.show"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "XyrT8257IWCu"
|
||||
},
|
||||
"source": [
|
||||
"# Questions\n",
|
||||
"\n",
|
||||
"1. What is $\\mbox{exp}[0]$? \n",
|
||||
"2. What is $\\mbox{exp}[1]$?\n",
|
||||
"3. What is $\\mbox{exp}[-\\infty]$?\n",
|
||||
"4. What is $\\mbox{exp}[+\\infty]$?\n",
|
||||
"5. A function is convex if we can draw a straight line between any two points on the\n",
|
||||
"function, and this line always lies above the function. Similarly, a function is concave\n",
|
||||
"if a straight line between any two points always lies below the function. Is the exponential function convex or concave or neither?\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "R6A4e5IxIWCu"
|
||||
},
|
||||
"source": [
|
||||
"Now let's consider the logarithm function $y=\\log[x]$. Throughout the book we always use natural (base $e$) logarithms. The log funcction maps non-negative numbers $[0,\\infty]$ to real numbers $[-\\infty,\\infty]$. It is the inverse of the exponential function. So when we compute $\\log[x]$ we are really asking \"What is the number $y$ so that $e^y=x$?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "fOR7v2iXIWCu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Draw the logarithm function\n",
|
||||
"\n",
|
||||
"# Define an array of x values from -5 to 5 with increments of 0.1\n",
|
||||
"x = np.arange(0.01,5.0, 0.01)\n",
|
||||
"y = np.log(x) ;\n",
|
||||
"\n",
|
||||
"# Plot this function\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(x,y,'r-')\n",
|
||||
"ax.set_ylim([-5,5]);ax.set_xlim([0,5])\n",
|
||||
"ax.set_xlabel('x'); ax.set_ylabel('$\\log[x]$')\n",
|
||||
"plt.show"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "yYWrL5AXIWCv"
|
||||
},
|
||||
"source": [
|
||||
"# Questions\n",
|
||||
"\n",
|
||||
"1. What is $\\mbox{log}[0]$? \n",
|
||||
"2. What is $\\mbox{log}[1]$?\n",
|
||||
"3. What is $\\mbox{log}[e]$?\n",
|
||||
"4. What is $\\mbox{log}[\\exp[3]]$?\n",
|
||||
"5. What is $\\mbox{exp}[\\log[4]]$?\n",
|
||||
"6. What is $\\mbox{log}[-1]$?\n",
|
||||
"7. Is the logarithm function concave or convex?\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"id": "XG0CKLiPJI7I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
253
Notebooks/Chap02/2_1_Supervised_Learning.ipynb
Normal file
253
Notebooks/Chap02/2_1_Supervised_Learning.ipynb
Normal file
@@ -0,0 +1,253 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOmndC0N7dFV7W3Mh5ljOLl",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap02/2_1_Supervised_Learning.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Notebook 2.1 Supervised Learning\n",
|
||||
"\n",
|
||||
"The purpose of this notebook is to explore the linear regression model dicussed in Chapter 2 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sfB2oX2RNvuF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "uoYl2Gn3Nr52"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Create some input / output data\n",
|
||||
"x = np.array([0.03, 0.19, 0.34, 0.46, 0.78, 0.81, 1.08, 1.18, 1.39, 1.60, 1.65, 1.90])\n",
|
||||
"y = np.array([0.67, 0.85, 1.05, 1.0, 1.40, 1.5, 1.3, 1.54, 1.55, 1.68, 1.73, 1.6 ])\n",
|
||||
"\n",
|
||||
"print(x)\n",
|
||||
"print(y)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MUbTD4znORtd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define 1D linear regression model\n",
|
||||
"def f(x, phi0, phi1):\n",
|
||||
" # TODO : Replace this line with the linear regression model (eq 2.4)\n",
|
||||
" y = x\n",
|
||||
"\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lw2dCRHwSW9a"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Function to help plot the data\n",
|
||||
"def plot(x, y, phi0, phi1):\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.scatter(x,y)\n",
|
||||
" plt.xlim([0,2.0])\n",
|
||||
" plt.ylim([0,2.0])\n",
|
||||
" ax.set_xlabel('Input, $x$')\n",
|
||||
" ax.set_ylabel('Output, $y$')\n",
|
||||
" # Draw line\n",
|
||||
" x_line = np.arange(0,2,0.01)\n",
|
||||
" y_line = f(x_line, phi0, phi1)\n",
|
||||
" plt.plot(x_line, y_line,'b-',lw=2)\n",
|
||||
"\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VT4F3xxSOt8C"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the intercept and slope as in figure 2.2b\n",
|
||||
"phi0 = 0.4 ; phi1 = 0.2\n",
|
||||
"# Plot the data and the model\n",
|
||||
"plot(x,y,phi0,phi1)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AkdZdmhHWuVR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Function to calculate the loss\n",
|
||||
"def compute_loss(x,y,phi0,phi1):\n",
|
||||
"\n",
|
||||
" # TODO Replace this line with the loss calculation (equation 2.5)\n",
|
||||
" loss = 0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return loss"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1-GW218wX44b"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Compute the loss for our current model\n",
|
||||
"loss = compute_loss(x,y,phi0,phi1)\n",
|
||||
"print(f'Your Loss = {loss:3.2f}, Ground truth =7.07')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Hgw7_GzBZ8tX"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the intercept and slope as in figure 2.2c\n",
|
||||
"phi0 = 1.60 ; phi1 =-0.8\n",
|
||||
"# Plot the data and the model\n",
|
||||
"plot(x,y,phi0,phi1)\n",
|
||||
"loss = compute_loss(x,y,phi0,phi1)\n",
|
||||
"print(f'Your Loss = {loss:3.2f}, Ground truth =10.28')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_vZS28-FahGP"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TO DO -- Change the parameters manually to fit the model\n",
|
||||
"# First fix phi1 and try changing phi0 until you can't make the loss go down any more\n",
|
||||
"# Then fix phi0 and try changing phi1 until you can't make the loss go down any more\n",
|
||||
"# Repeat this process until you find a set of parameters that fit the model as in figure 2.2d\n",
|
||||
"# You can either do this by hand, or if you want to get fancy, write code to descent automatically in this way\n",
|
||||
"# Start at these values:\n",
|
||||
"phi0 = 1.60 ; phi1 =-0.8\n",
|
||||
"\n",
|
||||
"plot(x,y,phi0,phi1)\n",
|
||||
"print(f'Your Loss = {compute_loss(x,y,phi0,phi1):3.2f}')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VzpnzdW5d9vj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Visualizing the loss function\n",
|
||||
"\n",
|
||||
"The above process is equivalent to to descending coordinate wise on the loss function<br>\n",
|
||||
"\n",
|
||||
"Now let's plot that function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MNC4qEZognEe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Make a 2D grid of possible phi0 and phi1 values\n",
|
||||
"phi0_mesh, phi1_mesh = np.meshgrid(np.arange(0.0,2.0,0.02), np.arange(-1.0,1.0,0.02))\n",
|
||||
"\n",
|
||||
"# Make a 2D array for the losses\n",
|
||||
"all_losses = np.zeros_like(phi1_mesh)\n",
|
||||
"# Run throught each 2D combination of phi0, phi1 and compute loss\n",
|
||||
"for indices,temp in np.ndenumerate(phi1_mesh):\n",
|
||||
" all_losses[indices] = compute_loss(x,y, phi0_mesh[indices], phi1_mesh[indices])\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ATrU8sqqg2hJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the loss function as a heatmap\n",
|
||||
"fig = plt.figure()\n",
|
||||
"ax = plt.axes()\n",
|
||||
"fig.set_size_inches(7,7)\n",
|
||||
"levels = 256\n",
|
||||
"ax.contourf(phi0_mesh, phi1_mesh, all_losses ,levels)\n",
|
||||
"levels = 40\n",
|
||||
"ax.contour(phi0_mesh, phi1_mesh, all_losses ,levels, colors=['#80808080'])\n",
|
||||
"ax.set_ylim([1,-1])\n",
|
||||
"ax.set_xlabel('Intercept, $\\phi_0$')\n",
|
||||
"ax.set_ylabel('Slope, $\\phi_1$')\n",
|
||||
"\n",
|
||||
"# Plot the position of your best fitting line on the loss function\n",
|
||||
"# It should be close to the minimum\n",
|
||||
"ax.plot(phi0,phi1,'ro')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "6OXAjx5xfQkl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
364
Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb
Normal file
364
Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb
Normal file
File diff suppressed because one or more lines are too long
294
Notebooks/Chap03/3_2_Shallow_Networks_II.ipynb
Normal file
294
Notebooks/Chap03/3_2_Shallow_Networks_II.ipynb
Normal file
@@ -0,0 +1,294 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPFqKOqd6BjlymOawCRkmfn",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap03/3_2_Shallow_Networks_II.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 3.2 -- Shallow neural networks II**\n",
|
||||
"\n",
|
||||
"The purpose of this notebook is to gain some familiarity with shallow neural networks with 2D inputs. It works through an example similar to figure 3.8 and experiments with different activation functions. <br><br>\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1Z6LB4Ybn1oN"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "hAM55ZjSncOk"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Code to draw 2D function -- read it so you know what is going on, but you don't have to change it\n",
|
||||
"def draw_2D_function(ax, x1_mesh, x2_mesh, y):\n",
|
||||
" pos = ax.contourf(x1_mesh, x2_mesh, y, levels=256 ,cmap = 'hot', vmin=-10,vmax=10.0)\n",
|
||||
" ax.set_xlabel('x1');ax.set_ylabel('x2')\n",
|
||||
" levels = np.arange(-10,10,1.0)\n",
|
||||
" ax.contour(x1_mesh, x2_mesh, y, levels, cmap='winter')\n",
|
||||
"\n",
|
||||
"# Plot the shallow neural network. We'll assume input in is range [0,10],[0,10] and output [-10,10]\n",
|
||||
"def plot_neural_2_inputs(x1,x2, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3):\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots(3,3)\n",
|
||||
" fig.set_size_inches(8.5, 8.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" draw_2D_function(ax[0,0], x1,x2,pre_1); ax[0,0].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[0,1], x1,x2,pre_2); ax[0,1].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[0,2], x1,x2,pre_3); ax[0,2].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[1,0], x1,x2,act_1); ax[1,0].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[1,1], x1,x2,act_2); ax[1,1].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[1,2], x1,x2,act_3); ax[1,2].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[2,0], x1,x2,w_act_1); ax[2,0].set_title('Weighted Act')\n",
|
||||
" draw_2D_function(ax[2,1], x1,x2,w_act_2); ax[2,1].set_title('Weighted Act')\n",
|
||||
" draw_2D_function(ax[2,2], x1,x2,w_act_3); ax[2,2].set_title('Weighted Act')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" draw_2D_function(ax,x1,x2,y)\n",
|
||||
" ax.set_title('Network output, $y$')\n",
|
||||
" ax.set_aspect(1.0)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IHtCP0t2HC4c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Lw71laEeJgKs"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a shallow neural network with, two input, one output, and three hidden units\n",
|
||||
"def shallow_2_1_3(x1,x2, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11,\\\n",
|
||||
" theta_12, theta_20, theta_21, theta_22, theta_30, theta_31, theta_32):\n",
|
||||
" # TODO Replace the lines below to compute the three initial linear functions\n",
|
||||
" # (figure 3.8a-c) from the theta parameters. These are the preactivations\n",
|
||||
" pre_1 = np.zeros_like(x1)\n",
|
||||
" pre_2 = np.zeros_like(x1)\n",
|
||||
" pre_3 = np.zeros_like(x1)\n",
|
||||
"\n",
|
||||
" # Pass these through the ReLU function to compute the activations as in\n",
|
||||
" # figure 3.8 d-f\n",
|
||||
" act_1 = activation_fn(pre_1)\n",
|
||||
" act_2 = activation_fn(pre_2)\n",
|
||||
" act_3 = activation_fn(pre_3)\n",
|
||||
"\n",
|
||||
" # TODO Replace the code below to weight the activations using phi1, phi2 and phi3\n",
|
||||
" # To create the equivalent of figure 3.8 g-i\n",
|
||||
" w_act_1 = np.zeros_like(x1)\n",
|
||||
" w_act_2 = np.zeros_like(x1)\n",
|
||||
" w_act_3 = np.zeros_like(x1)\n",
|
||||
"\n",
|
||||
" # TODO Replace the code below to combing the weighted activations and add\n",
|
||||
" # phi_0 to create the output as in figure 3.8j\n",
|
||||
" y = np.zeros_like(x1)\n",
|
||||
"\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VIZA8HywIjfl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the neural network\n",
|
||||
"theta_10 = -4.0 ; theta_11 = 0.9; theta_12 = 0.0\n",
|
||||
"theta_20 = 5.0 ; theta_21 = -0.9 ; theta_22 = -0.5\n",
|
||||
"theta_30 = -7 ; theta_31 = 0.5; theta_32 = 0.9\n",
|
||||
"phi_0 = 0.0; phi_1 = -2.0; phi_2 = 2.0; phi_3 = 1.5\n",
|
||||
"\n",
|
||||
"x1 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x2 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x1,x2 = np.meshgrid(x1,x2) # https://www.geeksforgeeks.org/numpy-meshgrid-function/\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3 = \\\n",
|
||||
" shallow_2_1_3(x1,x2, ReLU, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_12, theta_20, theta_21, theta_22, theta_30, theta_31, theta_32)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural_2_inputs(x1,x2, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "51lvc9bfIrs4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"How many different linear polytopes are made by this model? Identify each in the network output."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "j62IizIfMYZK"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll extend this model to have two outputs $y_1$ and $y_2$, each of which can be visualized with a separate heatmap. You will now have sets of parameters $\\phi_{10}, \\phi_{11},\\phi_{12}$ and $\\phi_{2}, \\phi_{21},\\phi_{22}$ that correspond to each of these outputs."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Xl6LcrUyM7Lh"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the shallow neural network. We'll assume input in is range [0,10],[0,10] and output [-10,10]\n",
|
||||
"def plot_neural_2_inputs_2_outputs(x1,x2, y1, y2, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_11, w_act_12, w_act_13, w_act_21, w_act_22, w_act_23):\n",
|
||||
"\n",
|
||||
" # Plot intermediate plots if flag set\n",
|
||||
" fig, ax = plt.subplots(4,3)\n",
|
||||
" fig.set_size_inches(8.5, 8.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" draw_2D_function(ax[0,0], x1,x2,pre_1); ax[0,0].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[0,1], x1,x2,pre_2); ax[0,1].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[0,2], x1,x2,pre_3); ax[0,2].set_title('Preactivation')\n",
|
||||
" draw_2D_function(ax[1,0], x1,x2,act_1); ax[1,0].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[1,1], x1,x2,act_2); ax[1,1].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[1,2], x1,x2,act_3); ax[1,2].set_title('Activation')\n",
|
||||
" draw_2D_function(ax[2,0], x1,x2,w_act_11); ax[2,0].set_title('Weighted Act 1')\n",
|
||||
" draw_2D_function(ax[2,1], x1,x2,w_act_12); ax[2,1].set_title('Weighted Act 1')\n",
|
||||
" draw_2D_function(ax[2,2], x1,x2,w_act_13); ax[2,2].set_title('Weighted Act 1')\n",
|
||||
" draw_2D_function(ax[3,0], x1,x2,w_act_21); ax[3,0].set_title('Weighted Act 2')\n",
|
||||
" draw_2D_function(ax[3,1], x1,x2,w_act_22); ax[3,1].set_title('Weighted Act 2')\n",
|
||||
" draw_2D_function(ax[3,2], x1,x2,w_act_23); ax[3,2].set_title('Weighted Act 2')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" draw_2D_function(ax,x1,x2,y1)\n",
|
||||
" ax.set_title('Network output, $y_1$')\n",
|
||||
" ax.set_aspect(1.0)\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" draw_2D_function(ax,x1,x2,y2)\n",
|
||||
" ax.set_title('Network output, $y_2$')\n",
|
||||
" ax.set_aspect(1.0)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "DlznqZWdPtjI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"# Define a shallow neural network with, two inputs, two outputs, and three hidden units\n",
|
||||
"def shallow_2_2_3(x1,x2, activation_fn, phi_10,phi_11,phi_12,phi_13, phi_20,phi_21,phi_22,phi_23, theta_10, theta_11,\\\n",
|
||||
" theta_12, theta_20, theta_21, theta_22, theta_30, theta_31, theta_32):\n",
|
||||
"\n",
|
||||
" # TODO -- write this function -- replace the dummy code blow\n",
|
||||
" pre_1 = np.zeros_like(x1)\n",
|
||||
" pre_2 = np.zeros_like(x1)\n",
|
||||
" pre_3 = np.zeros_like(x1)\n",
|
||||
" act_1 = np.zeros_like(x1)\n",
|
||||
" act_2 = np.zeros_like(x1)\n",
|
||||
" act_3 = np.zeros_like(x1)\n",
|
||||
" w_act_11 = np.zeros_like(x1)\n",
|
||||
" w_act_12 = np.zeros_like(x1)\n",
|
||||
" w_act_13 = np.zeros_like(x1)\n",
|
||||
" w_act_21 = np.zeros_like(x1)\n",
|
||||
" w_act_22 = np.zeros_like(x1)\n",
|
||||
" w_act_23 = np.zeros_like(x1)\n",
|
||||
" y1 = np.zeros_like(x1)\n",
|
||||
" y2 = np.zeros_like(x1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y1,y2, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_11, w_act_12, w_act_13, w_act_21, w_act_22, w_act_23\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "m8KAhwr4QWro"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the neural network\n",
|
||||
"theta_10 = -4.0 ; theta_11 = 0.9; theta_12 = 0.0\n",
|
||||
"theta_20 = 5.0 ; theta_21 = -0.9 ; theta_22 = -0.5\n",
|
||||
"theta_30 = -7 ; theta_31 = 0.5; theta_32 = 0.9\n",
|
||||
"phi_10 = 0.0; phi_11 = -2.0; phi_12 = 2.0; phi_13 = 1.5\n",
|
||||
"phi_20 = -2.0; phi_21 = -1.0; phi_22 = -2.0; phi_23 = 0.8\n",
|
||||
"\n",
|
||||
"x1 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x2 = np.arange(0.0, 10.0, 0.1)\n",
|
||||
"x1,x2 = np.meshgrid(x1,x2) # https://www.geeksforgeeks.org/numpy-meshgrid-function/\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y1, y2, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_11, w_act_12, w_act_13, w_act_21, w_act_22, w_act_23 = \\\n",
|
||||
" shallow_2_2_3(x1,x2, ReLU, phi_10,phi_11,phi_12,phi_13, phi_20,phi_21,phi_22,phi_23, theta_10, theta_11, theta_12, theta_20, theta_21, theta_22, theta_30, theta_31, theta_32)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural_2_inputs_2_outputs(x1,x2, y1, y2, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_11, w_act_12, w_act_13, w_act_21, w_act_22, w_act_23)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ms4YTqbYUeRV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
259
Notebooks/Chap03/3_3_Shallow_Network_Regions.ipynb
Normal file
259
Notebooks/Chap03/3_3_Shallow_Network_Regions.ipynb
Normal file
@@ -0,0 +1,259 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMhLSGU8+odPS/CoW5PwKna",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap03/3_3_Shallow_Network_Regions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 3.3 -- Shallow network regions**\n",
|
||||
"\n",
|
||||
"The purpose of this notebook is to compute the maximum possible number of linear regions as seen in figure 3.9 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "DCTC8fQ6cp-n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Imports math libray\n",
|
||||
"import math"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "W3C1ZA1gcpq_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The number of regions $N$ created by a shallow neural network with $D_i$ inputs and $D$ hidden units is given by Zaslavsky's formula:\n",
|
||||
"\n",
|
||||
"\\begin{equation}N = \\sum_{j=1}^{D_{i}}\\binom{D}{j}=\\sum_{j=1}^{D_{i}} \\frac{D!}{(D-j)!j!} \\end{equation} <br>\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TbfanfXBe84L"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4UQ2n0RWcgOb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def number_regions(Di, D):\n",
|
||||
" # TODO -- implement Zaslavsky's formula\n",
|
||||
" # You will need to use math.factorial() https://www.geeksforgeeks.org/factorial-in-python/\n",
|
||||
" # Replace this code\n",
|
||||
" N = 1;\n",
|
||||
"\n",
|
||||
" return N"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Calculate the number of regions for 2D input (Di=2) and 3 hidden units (D=3) as in figure 3.8j\n",
|
||||
"N = number_regions(2, 3)\n",
|
||||
"print(f\"Di=2, D=3, Number of regions = {int(N)}, True value = 7\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AqSUfuJDigN9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Calculate the number of regions for 10D input (Di=2) and 50 hidden units (D=50)\n",
|
||||
"N = number_regions(10, 50)\n",
|
||||
"print(f\"Di=10, D=50, Number of regions = {int(N)}, True value = 13432735556\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "krNKPV9gjCu-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"This works but there is a complication. If the number of hidden units $D$ is fewer than the number of hidden dimensions $D_i$ , the formula will fail. When this is the case, there are just $2^D$ regions (see figure 3.10 to understand why).\n",
|
||||
"\n",
|
||||
"Let's demonstrate this:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "rk1a2LqGkO9u"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Show that calculation fails when $D_i < D$\n",
|
||||
"try:\n",
|
||||
" N = number_regions(10, 8)\n",
|
||||
" print(f\"Di=10, D=8, Number of regions = {int(N)}, True value = 256\")\n",
|
||||
"except Exception as error:\n",
|
||||
" print(\"An exception occurred:\", error)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uq5IeAZTkIMg"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's do the calculation properly when D<Di\n",
|
||||
"D = 8; Di = 10\n",
|
||||
"N = np.power(2,D)\n",
|
||||
"# We can equivalently do this by calling number_regions with the D twice\n",
|
||||
"# Think about why this works\n",
|
||||
"N2 = number_regions (D,D)\n",
|
||||
"print(f\"Di=10, D=8, Number of regions = {int(N)}, Number of regions = {int(N2)}, True value = 256\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Ig8Kg_ADjoQd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the graph from figure 3.9a\n",
|
||||
"dims = np.array([1,5,10,50,100])\n",
|
||||
"regions = np.zeros((dims.shape[0], 1000))\n",
|
||||
"for c_dim in range(dims.shape[0]):\n",
|
||||
" D_i = dims[c_dim]\n",
|
||||
" print (f\"Counting regions for {D_i} input dimensions\")\n",
|
||||
" for D in range(1000):\n",
|
||||
" regions[c_dim, D] = number_regions(np.min([D_i,D]), D)\n",
|
||||
"\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.semilogy(regions[0,:],'k-')\n",
|
||||
"ax.semilogy(regions[1,:],'b-')\n",
|
||||
"ax.semilogy(regions[2,:],'m-')\n",
|
||||
"ax.semilogy(regions[3,:],'c-')\n",
|
||||
"ax.semilogy(regions[4,:],'y-')\n",
|
||||
"ax.legend(['$D_i$=1', '$D_i$=5', '$D_i$=10', '$D_i$=50', '$D_i$=100'])\n",
|
||||
"ax.set_xlabel(\"Number of hidden units, D\")\n",
|
||||
"ax.set_ylabel(\"Number of regions, N\")\n",
|
||||
"plt.xlim([0,1000])\n",
|
||||
"plt.ylim([1e1,1e150])\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5XnEOp0Bj_QK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's compute and plot the number of regions as a function of the number of parameters as in figure 3.9b\n",
|
||||
"# First let's write a function that computes the number of parameters as a function of the input dimension and number of hidden layers (assuming just one output)\n",
|
||||
"\n",
|
||||
"def number_parameters(D_i, D):\n",
|
||||
" # TODO -- replace this code with the proper calculation\n",
|
||||
" N = 1\n",
|
||||
"\n",
|
||||
" return N ;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Pav1OsCnpm6P"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's test the code\n",
|
||||
"N = number_parameters(10, 8)\n",
|
||||
"print(f\"Di=10, D=8, Number of parameters = {int(N)}, True value = 90\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VbhDmZ1gwkQj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the graph from figure 3.9a (takes ~1min)\n",
|
||||
"dims = np.array([1,5,10,50,100])\n",
|
||||
"regions = np.zeros((dims.shape[0], 200))\n",
|
||||
"params = np.zeros((dims.shape[0], 200))\n",
|
||||
"\n",
|
||||
"# We'll compute the five lines separately this time to make it faster\n",
|
||||
"for c_dim in range(dims.shape[0]):\n",
|
||||
" D_i = dims[c_dim]\n",
|
||||
" print (f\"Counting regions for {D_i} input dimensions\")\n",
|
||||
" for c_hidden in range(1, 200):\n",
|
||||
" # Iterate over different ranges of number hidden variables for different input sizes\n",
|
||||
" D = int(c_hidden * 500 / D_i)\n",
|
||||
" params[c_dim, c_hidden] = D_i * D +1 + D +1\n",
|
||||
" regions[c_dim, c_hidden] = number_regions(np.min([D_i,D]), D)\n",
|
||||
"\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.semilogy(params[0,:], regions[0,:],'k-')\n",
|
||||
"ax.semilogy(params[1,:], regions[1,:],'b-')\n",
|
||||
"ax.semilogy(params[2,:], regions[2,:],'m-')\n",
|
||||
"ax.semilogy(params[3,:], regions[3,:],'c-')\n",
|
||||
"ax.semilogy(params[4,:], regions[4,:],'y-')\n",
|
||||
"ax.legend(['$D_i$=1', '$D_i$=5', '$D_i$=10', '$D_i$=50', '$D_i$=100'])\n",
|
||||
"ax.set_xlabel(\"Number of parameters, D\")\n",
|
||||
"ax.set_ylabel(\"Number of regions, N\")\n",
|
||||
"plt.xlim([0,100000])\n",
|
||||
"plt.ylim([1e1,1e150])\n",
|
||||
"plt.show()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AH4nA50Au8-a"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
426
Notebooks/Chap03/3_4_Activation_Functions.ipynb
Normal file
426
Notebooks/Chap03/3_4_Activation_Functions.ipynb
Normal file
@@ -0,0 +1,426 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOu5BvK3aFb7ZEQKG5vfOZ1",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap03/3_4_Activation_Functions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 3.4 -- Activation functions**\n",
|
||||
"\n",
|
||||
"The purpose of this practical is to experiment with different activation functions. <br>\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Mn0F56yY8ohX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "2GaDML3I8Yx4"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the shallow neural network. We'll assume input in is range [0,1] and output [-1,1]\n",
|
||||
"# If the plot_all flag is set to true, then we'll plot all the intermediate stages as in Figure 3.3\n",
|
||||
"def plot_neural(x, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3, plot_all=False, x_data=None, y_data=None):\n",
|
||||
"\n",
|
||||
" # Plot intermediate plots if flag set\n",
|
||||
" if plot_all:\n",
|
||||
" fig, ax = plt.subplots(3,3)\n",
|
||||
" fig.set_size_inches(8.5, 8.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" ax[0,0].plot(x,pre_1,'r-'); ax[0,0].set_ylabel('Preactivation')\n",
|
||||
" ax[0,1].plot(x,pre_2,'b-'); ax[0,1].set_ylabel('Preactivation')\n",
|
||||
" ax[0,2].plot(x,pre_3,'g-'); ax[0,2].set_ylabel('Preactivation')\n",
|
||||
" ax[1,0].plot(x,act_1,'r-'); ax[1,0].set_ylabel('Activation')\n",
|
||||
" ax[1,1].plot(x,act_2,'b-'); ax[1,1].set_ylabel('Activation')\n",
|
||||
" ax[1,2].plot(x,act_3,'g-'); ax[1,2].set_ylabel('Activation')\n",
|
||||
" ax[2,0].plot(x,w_act_1,'r-'); ax[2,0].set_ylabel('Weighted Act')\n",
|
||||
" ax[2,1].plot(x,w_act_2,'b-'); ax[2,1].set_ylabel('Weighted Act')\n",
|
||||
" ax[2,2].plot(x,w_act_3,'g-'); ax[2,2].set_ylabel('Weighted Act')\n",
|
||||
"\n",
|
||||
" for plot_y in range(3):\n",
|
||||
" for plot_x in range(3):\n",
|
||||
" ax[plot_y,plot_x].set_xlim([0,1]);ax[plot_x,plot_y].set_ylim([-1,1])\n",
|
||||
" ax[plot_y,plot_x].set_aspect(0.5)\n",
|
||||
" ax[2,plot_y].set_xlabel('Input, $x$');\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" ax.plot(x,y)\n",
|
||||
" ax.set_xlabel('Input, $x$'); ax.set_ylabel('Output, $y$')\n",
|
||||
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_aspect(0.5)\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax.plot(x_data, y_data, 'mo')\n",
|
||||
" for i in range(len(x_data)):\n",
|
||||
" ax.plot(x_data[i], y_data[i],)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AeHzflFt9Tgn"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a shallow neural network with, one input, one output, and three hidden units\n",
|
||||
"def shallow_1_1_3(x, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31):\n",
|
||||
" pre_1 = theta_10 + theta_11 * x\n",
|
||||
" pre_2 = theta_20 + theta_21 * x\n",
|
||||
" pre_3 = theta_30 + theta_31 * x\n",
|
||||
" # Pass these through the ReLU function to compute the activations as in\n",
|
||||
" # figure 3.3 d-f\n",
|
||||
" act_1 = activation_fn(pre_1)\n",
|
||||
" act_2 = activation_fn(pre_2)\n",
|
||||
" act_3 = activation_fn(pre_3)\n",
|
||||
"\n",
|
||||
" w_act_1 = phi_1 * act_1\n",
|
||||
" w_act_2 = phi_2 * act_2\n",
|
||||
" w_act_3 = phi_3 * act_3\n",
|
||||
"\n",
|
||||
" y = phi_0 + w_act_1 + w_act_2 + w_act_3\n",
|
||||
"\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7qeIUrh19AkH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "cwTp__Fk9YUx"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First, let's run the network with a ReLU functions"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "INQkRzyn9kVC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the neural network\n",
|
||||
"theta_10 = 0.3 ; theta_11 = -1.0\n",
|
||||
"theta_20 = -1.0 ; theta_21 = 2.0\n",
|
||||
"theta_30 = -0.5 ; theta_31 = 0.65\n",
|
||||
"phi_0 = -0.3; phi_1 = 2.0; phi_2 = -1.0; phi_3 = 7.0\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x = np.arange(0,1,0.01)\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3 = \\\n",
|
||||
" shallow_1_1_3(x, ReLU, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(x, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3, plot_all=True)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jT9QuKou9i0_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Sigmoid activation function\n",
|
||||
"\n",
|
||||
"The ReLU isn't the only kind of activation function. For a long time, people used sigmoid functions. A logistic sigmoid function is defined by the equation\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"f[h] = \\frac{1}{1+\\exp{[-10 z ]}}\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"(Note that the factor of 10 is not standard -- but it allow us to plot on the same axes as the ReLU examples)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "-I8N7r1o9HYf"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the sigmoid function\n",
|
||||
"def sigmoid(preactivation):\n",
|
||||
" # TODO write code to implement the sigmoid function and compute the activation at the\n",
|
||||
" # hidden unit from the preactivation. Use the np.exp() function.\n",
|
||||
" activation = np.zeros_like(preactivation);\n",
|
||||
"\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hgkioNyr975Y"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Make an array of inputs\n",
|
||||
"z = np.arange(-1,1,0.01)\n",
|
||||
"sig_z = sigmoid(z)\n",
|
||||
"\n",
|
||||
"# Plot the sigmoid function\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(z,sig_z,'r-')\n",
|
||||
"ax.set_xlim([-1,1]);ax.set_ylim([0,1])\n",
|
||||
"ax.set_xlabel('z'); ax.set_ylabel('sig[z]')\n",
|
||||
"plt.show"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "94HIXKJH97ve"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's see what happens when we use this activation function in a neural network"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "p3zQNXhj-J-o"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"theta_10 = 0.3 ; theta_11 = -1.0\n",
|
||||
"theta_20 = -1.0 ; theta_21 = 2.0\n",
|
||||
"theta_30 = -0.5 ; theta_31 = 0.65\n",
|
||||
"phi_0 = 0.3; phi_1 = 0.5; phi_2 = -1.0; phi_3 = 0.9\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x = np.arange(0,1,0.01)\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3 = \\\n",
|
||||
" shallow_1_1_3(x, sigmoid, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(x, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3, plot_all=True)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "C1dASr9L-GNt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You probably notice that this gives nice smooth curves. So why don't we use this? Aha... it's not obvious right now, but we will get to it when we learn to fit models."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Uuam_DewA9fH"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Heaviside activation function\n",
|
||||
"\n",
|
||||
"The Heaviside function is defined as:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\mbox{heaviside}[z] = \\begin{cases} 0 & \\quad z <0 \\\\ 1 & \\quad z\\geq 0\\end{cases}\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "C9WKkcMUABze"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the heaviside function\n",
|
||||
"def heaviside(preactivation):\n",
|
||||
" # TODO write code to implement the heaviside function and compute the activation at the\n",
|
||||
" # hidden unit from the preactivation. Depending on your implementation you may need to\n",
|
||||
" # convert a Boolean array to an array of ones and zeros. To do this, use .astype(int)\n",
|
||||
" activation = np.zeros_like(preactivation);\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "-1qFkdOL-NPc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Make an array of inputs\n",
|
||||
"z = np.arange(-1,1,0.01)\n",
|
||||
"heav_z = heaviside(z)\n",
|
||||
"\n",
|
||||
"# Plot the heaviside function\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(z,heav_z,'r-')\n",
|
||||
"ax.set_xlim([-1,1]);ax.set_ylim([-2,2])\n",
|
||||
"ax.set_xlabel('z'); ax.set_ylabel('heaviside[z]')\n",
|
||||
"plt.show"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "mSPyp7iA-44H"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"theta_10 = 0.3 ; theta_11 = -1.0\n",
|
||||
"theta_20 = -1.0 ; theta_21 = 2.0\n",
|
||||
"theta_30 = -0.5 ; theta_31 = 0.65\n",
|
||||
"phi_0 = 0.3; phi_1 = 0.5; phi_2 = -1.0; phi_3 = 0.9\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x = np.arange(0,1,0.01)\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3 = \\\n",
|
||||
" shallow_1_1_3(x, heaviside, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(x, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3, plot_all=True)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t99K2lSl--Mq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"This can approximate any function, but the output is discontinuous, and there are also reasons not to use it that we will discover when we learn more about model fitting."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "T65MRtM-BCQA"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Linear activation functions\n",
|
||||
"\n",
|
||||
"Neural networks don't work if the activation function is linear. For example, consider what would happen if the activation function was:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\mbox{lin}[z] = a + bz\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RkB-XZMLBTaR"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the linear activation function\n",
|
||||
"def lin(preactivation):\n",
|
||||
" a =0\n",
|
||||
" b =1\n",
|
||||
" # Compute linear function\n",
|
||||
" activation = a+b * preactivation\n",
|
||||
" # Return\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Q59v3saj_jq1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# 1. The linear activation function above just returns the input: (0+1*z) = z\n",
|
||||
"# Before running the code Make a prediction about what the ten panels of the drawing will look like\n",
|
||||
"# Now run the code below to see if you were right. What family of functions can this represent?\n",
|
||||
"\n",
|
||||
"# 2. What happens if you change the parameters (a,b) to different values?\n",
|
||||
"# Try a=0.5, b=-0.4 Don't forget to run the cell again to update the function\n",
|
||||
"\n",
|
||||
"theta_10 = 0.3 ; theta_11 = -1.0\n",
|
||||
"theta_20 = -1.0 ; theta_21 = 2.0\n",
|
||||
"theta_30 = -0.5 ; theta_31 = 0.65\n",
|
||||
"phi_0 = 0.3; phi_1 = 0.5; phi_2 = -1.0; phi_3 = 0.9\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x = np.arange(0,1,0.01)\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3 = \\\n",
|
||||
" shallow_1_1_3(x, lin, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(x, y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3, plot_all=True)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IwodsBr0BkDn"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
362
Notebooks/Chap04/4_1_Composing_Networks.ipynb
Normal file
362
Notebooks/Chap04/4_1_Composing_Networks.ipynb
Normal file
@@ -0,0 +1,362 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPEQEGetZqWnLRNn99Q2aaT",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap04/4_1_Composing_Networks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#Notebook 4.1 -- Composing networks\n",
|
||||
"\n",
|
||||
"The purpose of this notebook is to understand what happens when we feed one neural network into another. It works through an example similar to 4.1 and varies both networks\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MaKn8CFlzN8E"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "8ClURpZQzI6L"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YdmveeAUz4YG"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a shallow neural network with, one input, one output, and three hidden units\n",
|
||||
"def shallow_1_1_3(x, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31):\n",
|
||||
" # Initial lines\n",
|
||||
" pre_1 = theta_10 + theta_11 * x\n",
|
||||
" pre_2 = theta_20 + theta_21 * x\n",
|
||||
" pre_3 = theta_30 + theta_31 * x\n",
|
||||
" # Activation functions\n",
|
||||
" act_1 = activation_fn(pre_1)\n",
|
||||
" act_2 = activation_fn(pre_2)\n",
|
||||
" act_3 = activation_fn(pre_3)\n",
|
||||
" # Weight activations\n",
|
||||
" w_act_1 = phi_1 * act_1\n",
|
||||
" w_act_2 = phi_2 * act_2\n",
|
||||
" w_act_3 = phi_3 * act_3\n",
|
||||
" # Combine weighted activation and add y offset\n",
|
||||
" y = phi_0 + w_act_1 + w_act_2 + w_act_3\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ximCLwIfz8kj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# # Plot two shallow neural networks and the composition of the two\n",
|
||||
"def plot_neural_two_components(x_in, net1_out, net2_out, net12_out=None):\n",
|
||||
"\n",
|
||||
" # Plot the two networks separately\n",
|
||||
" fig, ax = plt.subplots(1,2)\n",
|
||||
" fig.set_size_inches(8.5, 8.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" ax[0].plot(x_in, net1_out,'r-')\n",
|
||||
" ax[0].set_xlabel('Net 1 input'); ax[0].set_ylabel('Net 1 output')\n",
|
||||
" ax[0].set_xlim([-1,1]);ax[0].set_ylim([-1,1])\n",
|
||||
" ax[0].set_aspect(1.0)\n",
|
||||
" ax[1].plot(x_in, net2_out,'b-')\n",
|
||||
" ax[1].set_xlabel('Net 2 input'); ax[1].set_ylabel('Net 2 output')\n",
|
||||
" ax[1].set_xlim([-1,1]);ax[1].set_ylim([-1,1])\n",
|
||||
" ax[1].set_aspect(1.0)\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" if net12_out is not None:\n",
|
||||
" # Plot their composition\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" ax.plot(x_in ,net12_out,'g-')\n",
|
||||
" ax.set_xlabel('Net 1 Input'); ax.set_ylabel('Net 2 Output')\n",
|
||||
" ax.set_xlim([-1,1]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_aspect(1.0)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZB2HTalOE40X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's define two networks. We'll put the prefixes n1_ and n2_ before all the variables to make it clear which network is which. We'll just consider the inputs and outputs over the range [-1,1]. If you set the \"plot_all\" flat to True, you can see the details of how they were created."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LxBJCObC-NTY"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the first neural network\n",
|
||||
"n1_theta_10 = 0.0 ; n1_theta_11 = -1.0\n",
|
||||
"n1_theta_20 = 0 ; n1_theta_21 = 1.0\n",
|
||||
"n1_theta_30 = -0.67 ; n1_theta_31 = 1.0\n",
|
||||
"n1_phi_0 = 1.0; n1_phi_1 = -2.0; n1_phi_2 = -3.0; n1_phi_3 = 9.3\n",
|
||||
"\n",
|
||||
"# Now lets define some parameters and run the second neural network\n",
|
||||
"n2_theta_10 = -0.6 ; n2_theta_11 = -1.0\n",
|
||||
"n2_theta_20 = 0.2 ; n2_theta_21 = 1.0\n",
|
||||
"n2_theta_30 = -0.5 ; n2_theta_31 = 1.0\n",
|
||||
"n2_phi_0 = 0.5; n2_phi_1 = -1.0; n2_phi_2 = -1.5; n2_phi_3 = 2.0\n",
|
||||
"\n",
|
||||
"# Display the two inputs\n",
|
||||
"x = np.arange(-1,1,0.001)\n",
|
||||
"# We run the first and second neural networks for each of these input values\n",
|
||||
"net1_out = shallow_1_1_3(x, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"net2_out = shallow_1_1_3(x, ReLU, n2_phi_0, n2_phi_1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"# Plot both graphs\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JRebvurv22pT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Take a piece of paper and draw what you think will happen when we feed the\n",
|
||||
"# output of the first network into the second one. Draw the relationship between\n",
|
||||
"# the input of the first network and the output of the second one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NUQVop9-Xta1"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's see if your predictions were right\n",
|
||||
"\n",
|
||||
"# TODO feed the output of first network into second network (replace this line)\n",
|
||||
"net12_out = np.zeros_like(x)\n",
|
||||
"\n",
|
||||
"# Plot all three graphs\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out, net12_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Yq7GH-MCIyPI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now we'll change things a up a bit. What happens if we change the second network? (note the *-1 change)\n",
|
||||
"net1_out = shallow_1_1_3(x, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"net2_out = shallow_1_1_3(x, ReLU, n2_phi_0, n2_phi_1*-1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BMlLkLbdEuPu"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Take a piece of paper and draw what you think will happen when we feed the\n",
|
||||
"# output of the first network into the second one now that we have changed it. Draw the relationship between\n",
|
||||
"# the input of the first network and the output of the second one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Of6jVXLTJ688"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# When you have a prediction, run this code to see if you were right\n",
|
||||
"net12_out = shallow_1_1_3(net1_out, ReLU, n2_phi_0, n2_phi_1*-1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out, net12_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PbbSCaSeK6SM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's change things again. What happens if we change the first network? (note the changes)\n",
|
||||
"net1_out = shallow_1_1_3(x, ReLU, n1_phi_0, n1_phi_1*0.5, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"net2_out = shallow_1_1_3(x, ReLU, n2_phi_0, n2_phi_1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "b39mcSGFK9Fd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Take a piece of paper and draw what you think will happen when we feed the\n",
|
||||
"# output of the first network now we have changed it into the original second network. Draw the relationship between\n",
|
||||
"# the input of the first network and the output of the second one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MhO40cC_LW9I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# When you have a prediction, run this code to see if you were right\n",
|
||||
"net12_out = shallow_1_1_3(net1_out, ReLU, n2_phi_0, n2_phi_1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out, net12_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Akwo-hnPLkNr"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's change things again. What happens if the first network and second networks are the same?\n",
|
||||
"net1_out = shallow_1_1_3(x, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"net2_out_new = shallow_1_1_3(x, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out_new)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TJ7wXKpRLl_E"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Take a piece of paper and draw what you think will happen when we feed the\n",
|
||||
"# output of the first network into the original second network. Draw the relationship between\n",
|
||||
"# the input of the first network and the output of the second one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dJbbh6R7NG9k"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# When you have a prediction, run this code to see if you were right\n",
|
||||
"net12_out = shallow_1_1_3(net1_out, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"plot_neural_two_components(x, net1_out, net2_out_new, net12_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BiZZl3yNM2Bq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Contemplate what you think will happen when we feed the\n",
|
||||
"# output of the original first network into a second copy of the original first network, and then\n",
|
||||
"# the output of that into the original second network (so now we have a three layer network)\n",
|
||||
"# How many total linear regions will we have in the output?\n",
|
||||
"net123_out = shallow_1_1_3(net12_out, ReLU, n2_phi_0, n2_phi_1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"plot_neural_two_components(x, net12_out, net2_out, net123_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BSd51AkzNf7-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TO DO\n",
|
||||
"# How many linear regions would there be if we ran N copies of the first network, feeding the result of the first\n",
|
||||
"# into the second, the second into the third and so on, and then passed the result into the original second\n",
|
||||
"# network (blue curve above)\n",
|
||||
"\n",
|
||||
"# Take away conclusion: with very few parameters, we can make A LOT of linear regions, but\n",
|
||||
"# they depend on one another in complex ways that quickly become to difficult to understand intuitively."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HqzePCLOVQK7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
219
Notebooks/Chap04/4_2_Clipping_functions.ipynb
Normal file
219
Notebooks/Chap04/4_2_Clipping_functions.ipynb
Normal file
@@ -0,0 +1,219 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPkFrjmRAUf0fxN07RC4xMI",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap04/4_2_Clipping_functions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#Notebook 4.2 -- Clipping functions\n",
|
||||
"\n",
|
||||
"The purpose of this notebook is to understand how a neural network with two hidden layers build more complicated functions by clipping and recombining the representations at the intermediate hidden variables.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MaKn8CFlzN8E"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "8ClURpZQzI6L"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YdmveeAUz4YG"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a deep neural network with, one input, one output, two hidden layers and three hidden units (eqns 4.7-4.9)\n",
|
||||
"# To make this easier, we store the parameters in ndarrays, so phi_0 = phi[0] and psi_3,3 = psi[3,3] etc.\n",
|
||||
"def shallow_1_1_3_3(x, activation_fn, phi, psi, theta):\n",
|
||||
"\n",
|
||||
" # TODO -- You write this function\n",
|
||||
" # Replace the skeleton code below.\n",
|
||||
"\n",
|
||||
" # ANSWER\n",
|
||||
" # Preactivations at layer 1 (terms in brackets in equation 4.7)\n",
|
||||
" layer1_pre_1 = np.zeros_like(x) ;\n",
|
||||
" layer1_pre_2 = np.zeros_like(x) ;\n",
|
||||
" layer1_pre_3 = np.zeros_like(x) ;\n",
|
||||
"\n",
|
||||
" # Activation functions (rest of equation 4.7)\n",
|
||||
" h1 = activation_fn(layer1_pre_1)\n",
|
||||
" h2 = activation_fn(layer1_pre_2)\n",
|
||||
" h3 = activation_fn(layer1_pre_3)\n",
|
||||
"\n",
|
||||
" # Preactivations at layer 2 (terms in brackets in equation 4.8)\n",
|
||||
" layer2_pre_1 = np.zeros_like(x) ;\n",
|
||||
" layer2_pre_2 = np.zeros_like(x) ;\n",
|
||||
" layer2_pre_3 = np.zeros_like(x) ;\n",
|
||||
"\n",
|
||||
" # Activation functions (rest of equation 4.8)\n",
|
||||
" h1_prime = activation_fn(layer2_pre_1)\n",
|
||||
" h2_prime = activation_fn(layer2_pre_2)\n",
|
||||
" h3_prime = activation_fn(layer2_pre_3)\n",
|
||||
"\n",
|
||||
" # Weighted outputs by phi (three last terms of equation 4.9)\n",
|
||||
" phi1_h1_prime = np.zeros_like(x) ;\n",
|
||||
" phi2_h2_prime = np.zeros_like(x) ;\n",
|
||||
" phi3_h3_prime = np.zeros_like(x) ;\n",
|
||||
"\n",
|
||||
" # Combine weighted activation and add y offset (summing terms of equation 4.9)\n",
|
||||
" y = np.zeros_like(x) ;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y, layer2_pre_1, layer2_pre_2, layer2_pre_3, h1_prime, h2_prime, h3_prime, phi1_h1_prime, phi2_h2_prime, phi3_h3_prime"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ximCLwIfz8kj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# # Plot two layer neural network as in figure 4.5\n",
|
||||
"def plot_neural_two_layers(x, y, layer2_pre_1, layer2_pre_2, layer2_pre_3, h1_prime, h2_prime, h3_prime, phi1_h1_prime, phi2_h2_prime, phi3_h3_prime):\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots(3,3)\n",
|
||||
" fig.set_size_inches(8.5, 8.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" ax[0,0].plot(x,layer2_pre_1,'r-'); ax[0,0].set_ylabel('$\\psi_{10}+\\psi_{11}h_{1}+\\psi_{12}h_{2}+\\psi_{13}h_3$')\n",
|
||||
" ax[0,1].plot(x,layer2_pre_2,'b-'); ax[0,1].set_ylabel('$\\psi_{20}+\\psi_{21}h_{1}+\\psi_{22}h_{2}+\\psi_{23}h_3$')\n",
|
||||
" ax[0,2].plot(x,layer2_pre_3,'g-'); ax[0,2].set_ylabel('$\\psi_{30}+\\psi_{31}h_{1}+\\psi_{32}h_{2}+\\psi_{33}h_3$')\n",
|
||||
" ax[1,0].plot(x,h1_prime,'r-'); ax[1,0].set_ylabel(\"$h_{1}^{'}$\")\n",
|
||||
" ax[1,1].plot(x,h2_prime,'b-'); ax[1,1].set_ylabel(\"$h_{2}^{'}$\")\n",
|
||||
" ax[1,2].plot(x,h3_prime,'g-'); ax[1,2].set_ylabel(\"$h_{3}^{'}$\")\n",
|
||||
" ax[2,0].plot(x,phi1_h1_prime,'r-'); ax[2,0].set_ylabel(\"$\\phi_1 h_{1}^{'}$\")\n",
|
||||
" ax[2,1].plot(x,phi2_h2_prime,'b-'); ax[2,1].set_ylabel(\"$\\phi_2 h_{2}^{'}$\")\n",
|
||||
" ax[2,2].plot(x,phi3_h3_prime,'g-'); ax[2,2].set_ylabel(\"$\\phi_3 h_{3}^{'}$\")\n",
|
||||
"\n",
|
||||
" for plot_y in range(3):\n",
|
||||
" for plot_x in range(3):\n",
|
||||
" ax[plot_y,plot_x].set_xlim([0,1]);ax[plot_x,plot_y].set_ylim([-1,1])\n",
|
||||
" ax[plot_y,plot_x].set_aspect(0.5)\n",
|
||||
" ax[2,plot_y].set_xlabel('Input, $x$');\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" ax.plot(x,y)\n",
|
||||
" ax.set_xlabel('Input, $x$'); ax.set_ylabel('Output, $y$')\n",
|
||||
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_aspect(0.5)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZB2HTalOE40X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's define the parameters and visualize the network"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LxBJCObC-NTY"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define parameters (note first dimension of theta and phi is padded to make indices match\n",
|
||||
"# notation in book)\n",
|
||||
"theta = np.zeros([4,2])\n",
|
||||
"psi = np.zeros([4,4])\n",
|
||||
"phi = np.zeros([4,1])\n",
|
||||
"\n",
|
||||
"theta[1,0] = 0.3 ; theta[1,1] = -1.0\n",
|
||||
"theta[2,0]= -1.0 ; theta[2,1] = 2.0\n",
|
||||
"theta[3,0] = -0.5 ; theta[3,1] = 0.65\n",
|
||||
"psi[1,0] = 0.3; psi[1,1] = 2.0; psi[1,2] = -1.0; psi[1,3]=7.0\n",
|
||||
"psi[2,0] = -0.2; psi[2,1] = 2.0; psi[2,2] = 1.2; psi[2,3]=-8.0\n",
|
||||
"psi[3,0] = 0.3; psi[3,1] = -2.3; psi[3,2] = -0.8; psi[3,3]=2.0\n",
|
||||
"phi[0] = 0.0; phi[1] = 0.5; phi[2] = -1.5; phi [3] = 2.2\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x = np.arange(0,1,0.01)\n",
|
||||
"\n",
|
||||
"# Run the neural network\n",
|
||||
"y, layer2_pre_1, layer2_pre_2, layer2_pre_3, h1_prime, h2_prime, h3_prime, phi1_h1_prime, phi2_h2_prime, phi3_h3_prime \\\n",
|
||||
" = shallow_1_1_3_3(x, ReLU, phi, psi, theta)\n",
|
||||
"\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural_two_layers(x, y, layer2_pre_1, layer2_pre_2, layer2_pre_3, h1_prime, h2_prime, h3_prime, phi1_h1_prime, phi2_h2_prime, phi3_h3_prime)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JRebvurv22pT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"To do: To test your understanding of this, consider:\n",
|
||||
"\n",
|
||||
"1. What would happen if we increase $\\psi_{1,0}$?\n",
|
||||
"2. What would happen if we multiplied $\\psi_{2,0}, \\psi_{2,1}, \\psi_{2,2}, \\psi_{2,3}$ by -1?\n",
|
||||
"3. What would happen if set $\\phi_{3}$ to -1?\n",
|
||||
"\n",
|
||||
"You can rerun the code to see if you were correct.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GcjUUHbXf25D"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
322
Notebooks/Chap04/4_3_Deep_Networks.ipynb
Normal file
322
Notebooks/Chap04/4_3_Deep_Networks.ipynb
Normal file
@@ -0,0 +1,322 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMbJGN6f2+yKzzsVep/wi5U",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap04/4_3_Deep_Networks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 4.3 Deep neural networks**\n",
|
||||
"\n",
|
||||
"This network investigates converting neural networks to matrix form.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MaKn8CFlzN8E"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "8ClURpZQzI6L"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YdmveeAUz4YG"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a shallow neural network with, one input, one output, and three hidden units\n",
|
||||
"def shallow_1_1_3(x, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31):\n",
|
||||
" # Initial lines\n",
|
||||
" pre_1 = theta_10 + theta_11 * x\n",
|
||||
" pre_2 = theta_20 + theta_21 * x\n",
|
||||
" pre_3 = theta_30 + theta_31 * x\n",
|
||||
" # Activation functions\n",
|
||||
" act_1 = activation_fn(pre_1)\n",
|
||||
" act_2 = activation_fn(pre_2)\n",
|
||||
" act_3 = activation_fn(pre_3)\n",
|
||||
" # Weight activations\n",
|
||||
" w_act_1 = phi_1 * act_1\n",
|
||||
" w_act_2 = phi_2 * act_2\n",
|
||||
" w_act_3 = phi_3 * act_3\n",
|
||||
" # Combine weighted activation and add y offset\n",
|
||||
" y = phi_0 + w_act_1 + w_act_2 + w_act_3\n",
|
||||
" # Return everything we have calculated\n",
|
||||
" return y, pre_1, pre_2, pre_3, act_1, act_2, act_3, w_act_1, w_act_2, w_act_3"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ximCLwIfz8kj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# # Plot the shallow neural network. We'll assume input in is range [-1,1] and output [-1,1]\n",
|
||||
"# If the plot_all flag is set to true, then we'll plot all the intermediate stages as in Figure 3.3\n",
|
||||
"def plot_neural(x, y):\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" ax.plot(x.T,y.T)\n",
|
||||
" ax.set_xlabel('Input'); ax.set_ylabel('Output')\n",
|
||||
" ax.set_xlim([-1,1]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_aspect(1.0)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "btrt7BX20gKD"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's define a networks. We'll just consider the inputs and outputs over the range [-1,1]. If you set the \"plot_all\" flat to True, you can see the details of how it was created."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LxBJCObC-NTY"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the first neural network\n",
|
||||
"n1_theta_10 = 0.0 ; n1_theta_11 = -1.0\n",
|
||||
"n1_theta_20 = 0 ; n1_theta_21 = 1.0\n",
|
||||
"n1_theta_30 = -0.67 ; n1_theta_31 = 1.0\n",
|
||||
"n1_phi_0 = 1.0; n1_phi_1 = -2.0; n1_phi_2 = -3.0; n1_phi_3 = 9.3\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"n1_in = np.arange(-1,1,0.01).reshape([1,-1])\n",
|
||||
"\n",
|
||||
"# We run the neural network for each of these input values\n",
|
||||
"n1_out, *_ = shallow_1_1_3(n1_in, ReLU, n1_phi_0, n1_phi_1, n1_phi_2, n1_phi_3, n1_theta_10, n1_theta_11, n1_theta_20, n1_theta_21, n1_theta_30, n1_theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(n1_in, n1_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JRebvurv22pT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll define the same neural network, but this time, we will use matrix form. When you get this right, it will draw the same plot as above."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XCJqo_AjfAra"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"beta_0 = np.zeros((3,1))\n",
|
||||
"Omega_0 = np.zeros((3,1))\n",
|
||||
"beta_1 = np.zeros((1,1))\n",
|
||||
"Omega_1 = np.zeros((1,3))\n",
|
||||
"\n",
|
||||
"# TODO Fill in the values of the beta and Omega matrices with the n1_theta and n1_phi parameters that define the network above\n",
|
||||
"# !!! NOTE THAT MATRICES ARE CONVENTIONALLY INDEXED WITH a_11 IN THE TOP LEFT CORNER, BUT NDARRAYS START AT [0,0]\n",
|
||||
"# To get you started I've filled in a couple:\n",
|
||||
"beta_0[0,0] = n1_theta_10\n",
|
||||
"Omega_0[0,0] = n1_theta_11\n",
|
||||
"\n",
|
||||
"# Make sure that input data matrix has different inputs in its columns\n",
|
||||
"n_data = n1_in.size\n",
|
||||
"n_dim_in = 1\n",
|
||||
"n1_in_mat = np.reshape(n1_in,(n_dim_in,n_data))\n",
|
||||
"\n",
|
||||
"# This runs the network for ALL of the inputs, x at once so we can draw graph\n",
|
||||
"h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(Omega_0,n1_in_mat))\n",
|
||||
"n1_out = np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(Omega_1,h1)\n",
|
||||
"\n",
|
||||
"# Draw the network and check that it looks the same as the non-matrix case\n",
|
||||
"plot_neural(n1_in, n1_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MR0AecZYfACR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll feed the output of the first network into the second one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qOcj2Rof-o20"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now lets define some parameters and run the second neural network\n",
|
||||
"n2_theta_10 = -0.6 ; n2_theta_11 = -1.0\n",
|
||||
"n2_theta_20 = 0.2 ; n2_theta_21 = 1.0\n",
|
||||
"n2_theta_30 = -0.5 ; n2_theta_31 = 1.0\n",
|
||||
"n2_phi_0 = 0.5; n2_phi_1 = -1.0; n2_phi_2 = -1.5; n2_phi_3 = 2.0\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"n2_in = np.arange(-1,1,0.01)\n",
|
||||
"\n",
|
||||
"# We run the second neural network on the output of the first network\n",
|
||||
"n2_out, *_ = \\\n",
|
||||
" shallow_1_1_3(n1_out, ReLU, n2_phi_0, n2_phi_1, n2_phi_2, n2_phi_3, n2_theta_10, n2_theta_11, n2_theta_20, n2_theta_21, n2_theta_30, n2_theta_31)\n",
|
||||
"# And then plot it\n",
|
||||
"plot_neural(n1_in, n2_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZRjWu8i9239X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"beta_0 = np.zeros((3,1))\n",
|
||||
"Omega_0 = np.zeros((3,1))\n",
|
||||
"beta_1 = np.zeros((3,1))\n",
|
||||
"Omega_1 = np.zeros((3,3))\n",
|
||||
"beta_2 = np.zeros((1,1))\n",
|
||||
"Omega_2 = np.zeros((1,3))\n",
|
||||
"\n",
|
||||
"# TODO Fill in the values of the beta and Omega matrices for with the n1_theta, n1_phi, n2_theta, and n2_phi parameters\n",
|
||||
"# that define the composition of the two networks above (see eqn 4.5 for Omega1 and beta1 albeit in different notation)\n",
|
||||
"# !!! NOTE THAT MATRICES ARE CONVENTIONALLY INDEXED WITH a_11 IN THE TOP LEFT CORNER, BUT NDARRAYS START AT [0,0] SO EVERYTHING IS OFFSET\n",
|
||||
"# To get you started I've filled in a few:\n",
|
||||
"beta_0[0,0] = n1_theta_10\n",
|
||||
"Omega_0[0,0] = n1_theta_11\n",
|
||||
"beta_1[0,0] = n2_theta_10 + n2_theta_11 * n1_phi_0\n",
|
||||
"Omega_1[0,0] = n2_theta_11 * n1_phi_1\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Make sure that input data matrix has different inputs in its columns\n",
|
||||
"n_data = n1_in.size\n",
|
||||
"n_dim_in = 1\n",
|
||||
"n1_in_mat = np.reshape(n1_in,(n_dim_in,n_data))\n",
|
||||
"\n",
|
||||
"# This runs the network for ALL of the inputs, x at once so we can draw graph (hence extra np.ones term)\n",
|
||||
"h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(Omega_0,n1_in_mat))\n",
|
||||
"h2 = ReLU(np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(Omega_1,h1))\n",
|
||||
"n1_out = np.matmul(beta_2,np.ones((1,n_data))) + np.matmul(Omega_2,h2)\n",
|
||||
"\n",
|
||||
"# Draw the network and check that it looks the same as the non-matrix version\n",
|
||||
"plot_neural(n1_in, n1_out)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZB2HTalOE40X"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's make a deep network with 3 hidden layers. It will have $D_i=4$ inputs, $D_1=5$ neurons in the first layer, $D_2=2$ neurons in the second layer and $D_3=4$ neurons in the third layer, and $D_o = 1$ output. Consult figure 4.6 and equations 4.15 for guidance."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0VANqxH2kyS4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# define sizes\n",
|
||||
"D_i=4; D_1=5; D_2=2; D_3=1; D_o=1\n",
|
||||
"# We'll choose the inputs and parameters of this network randomly using np.random.normal\n",
|
||||
"# For example, we'll set the input using\n",
|
||||
"n_data = 4;\n",
|
||||
"x = np.random.normal(size=(D_i, n_data))\n",
|
||||
"# TODO initialize the parameters randomly with the correct sizes\n",
|
||||
"# Replace the lines below\n",
|
||||
"beta_0 = np.random.normal(size=(1,1))\n",
|
||||
"Omega_0 = np.random.normal(size=(1,1))\n",
|
||||
"beta_1 = np.random.normal(size=(1,1))\n",
|
||||
"Omega_1 = np.random.normal(size=(1,1))\n",
|
||||
"beta_2 = np.random.normal(size=(1,1))\n",
|
||||
"Omega_2 = np.random.normal(size=(1,1))\n",
|
||||
"beta_3 = np.random.normal(size=(1,1))\n",
|
||||
"Omega_3 = np.random.normal(size=(1,1))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# If you set the parameters to the correct sizes, the following code will run\n",
|
||||
"h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(Omega_0,x));\n",
|
||||
"h2 = ReLU(np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(Omega_1,h1));\n",
|
||||
"h3 = ReLU(np.matmul(beta_2,np.ones((1,n_data))) + np.matmul(Omega_2,h2));\n",
|
||||
"y = np.matmul(beta_3,np.ones((1,n_data))) + np.matmul(Omega_3,h3)\n",
|
||||
"\n",
|
||||
"if h1.shape[0] is not D_1 or h1.shape[1] is not n_data:\n",
|
||||
" print(\"h1 is wrong shape\")\n",
|
||||
"if h2.shape[0] is not D_2 or h1.shape[1] is not n_data:\n",
|
||||
" print(\"h2 is wrong shape\")\n",
|
||||
"if h3.shape[0] is not D_3 or h1.shape[1] is not n_data:\n",
|
||||
" print(\"h3 is wrong shape\")\n",
|
||||
"if y.shape[0] is not D_o or h1.shape[1] is not n_data:\n",
|
||||
" print(\"Output is wrong shape\")\n",
|
||||
"\n",
|
||||
"# Print the inputs and outputs\n",
|
||||
"print(\"Input data points\")\n",
|
||||
"print(x)\n",
|
||||
"print (\"Output data points\")\n",
|
||||
"print(y)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RdBVAc_Rj22-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
566
Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb
Normal file
566
Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb
Normal file
@@ -0,0 +1,566 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNkBMOVt5gO7Awn9JMn4N8Z",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 5.1: Least Squares Loss**\n",
|
||||
"\n",
|
||||
"This notebook investigates the least squares loss and the equivalence of maximum likelihood and minimum negative log likelihood.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jSlFkICHwHQF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "PYMZ1x-Pv1ht"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Import math Library\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation\n",
|
||||
"\n",
|
||||
"# Define a shallow neural network\n",
|
||||
"def shallow_nn(x, beta_0, omega_0, beta_1, omaga_1):\n",
|
||||
" # Make sure that input data is (1 x n_data) array\n",
|
||||
" n_data = x.size\n",
|
||||
" x = np.reshape(x,(1,n_data))\n",
|
||||
"\n",
|
||||
" # This runs the network for ALL of the inputs, x at once so we can draw graph\n",
|
||||
" h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(omega_0,x))\n",
|
||||
" y = np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(omega_1,h1)\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fv7SZR3tv7mV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Get parameters for model -- we can call this function to easily reset them\n",
|
||||
"def get_parameters():\n",
|
||||
" # And we'll create a network that approximately fits it\n",
|
||||
" beta_0 = np.zeros((3,1)); # formerly theta_x0\n",
|
||||
" omega_0 = np.zeros((3,1)); # formerly theta_x1\n",
|
||||
" beta_1 = np.zeros((1,1)); # formerly phi_0\n",
|
||||
" omega_1 = np.zeros((1,3)); # formerly phi_x\n",
|
||||
"\n",
|
||||
" beta_0[0,0] = 0.3; beta_0[1,0] = -1.0; beta_0[2,0] = -0.5\n",
|
||||
" omega_0[0,0] = -1.0; omega_0[1,0] = 1.8; omega_0[2,0] = 0.65\n",
|
||||
" beta_1[0,0] = 0.1;\n",
|
||||
" omega_1[0,0] = -2.0; omega_1[0,1] = -1.0; omega_1[0,2] = 7.0\n",
|
||||
"\n",
|
||||
" return beta_0, omega_0, beta_1, omega_1"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pUT9Ain_HRim"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Utility function for plotting data\n",
|
||||
"def plot_univariate_regression(x_model, y_model, x_data = None, y_data = None, sigma_model = None, title= None):\n",
|
||||
" # Make sure model data are 1D arrays\n",
|
||||
" x_model = np.squeeze(x_model)\n",
|
||||
" y_model = np.squeeze(y_model)\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots()\n",
|
||||
" ax.plot(x_model,y_model)\n",
|
||||
" if sigma_model is not None:\n",
|
||||
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
||||
" ax.set_xlabel('Input, $x$'); ax.set_ylabel('Output, $y$')\n",
|
||||
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_aspect(0.5)\n",
|
||||
" if title is not None:\n",
|
||||
" ax.set_title(title)\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax.plot(x_data, y_data, 'ko')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NRR67ri_1TzN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Univariate regression\n",
|
||||
"\n",
|
||||
"We'll investigate a simple univarite regression situation with a single input $x$ and a single output $y$ as pictured in figures 5.4 and 5.5b."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PsgLZwsPxauP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create some 1D training data\n",
|
||||
"x_train = np.array([0.09291784,0.46809093,0.93089486,0.67612654,0.73441752,0.86847339,\\\n",
|
||||
" 0.49873225,0.51083168,0.18343972,0.99380898,0.27840809,0.38028817,\\\n",
|
||||
" 0.12055708,0.56715537,0.92005746,0.77072270,0.85278176,0.05315950,\\\n",
|
||||
" 0.87168699,0.58858043])\n",
|
||||
"y_train = np.array([-0.25934537,0.18195445,0.651270150,0.13921448,0.09366691,0.30567674,\\\n",
|
||||
" 0.372291170,0.20716968,-0.08131792,0.51187806,0.16943738,0.3994327,\\\n",
|
||||
" 0.019062570,0.55820410,0.452564960,-0.1183121,0.02957665,-1.24354444, \\\n",
|
||||
" 0.248038840,0.26824970])\n",
|
||||
"\n",
|
||||
"# Get parameters for the model\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"sigma = 0.2\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x_model = np.arange(0,1,0.01)\n",
|
||||
"# Run the model to get values to plot and plot it.\n",
|
||||
"y_model = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"plot_univariate_regression(x_model, y_model, x_train, y_train, sigma_model = sigma)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VWzNOt1swFVd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The blue line is the mean prediction of the model and the gray area represents plus/minus two standard deviations. This model fits okay, but could be improved. Let's compute the loss. We'll compute the the least squares error, the likelihood, the negative log likelihood."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MvVX6tl9AEXF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return probability under normal distribution for input x\n",
|
||||
"def normal_distribution(y, mu, sigma):\n",
|
||||
" # TODO-- write in the equation for the normal distribution\n",
|
||||
" # Equation 5.7 from the notes (you will need np.sqrt() and np.exp(), and math.pi)\n",
|
||||
" # Don't use the numpy version -- that's cheating!\n",
|
||||
" # Replace the line below\n",
|
||||
" prob = np.zeros_like(y)\n",
|
||||
"\n",
|
||||
" return prob"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YaLdRlEX0FkU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.119,normal_distribution(1,-1,2.3)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4TSL14dqHHbV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's plot the Gaussian distribution.\n",
|
||||
"y_gauss = np.arange(-5,5,0.1)\n",
|
||||
"mu = 0; sigma = 1.0\n",
|
||||
"gauss_prob = normal_distribution(y_gauss, mu, sigma)\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(y_gauss, gauss_prob)\n",
|
||||
"ax.set_xlabel('Input, $y$'); ax.set_ylabel('Probability $Pr(y)$')\n",
|
||||
"ax.set_xlim([-5,5]);ax.set_ylim([0,1.0])\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# TODO\n",
|
||||
"# 1. Predict what will happen if we change to mu=1 and leave sigma=1\n",
|
||||
"# Now change the code above and see if you were correct.\n",
|
||||
"\n",
|
||||
"# 2. Predict what will happen if we leave mu = 0 and change sigma to 2.0\n",
|
||||
"\n",
|
||||
"# 3. Predict what will happen if we leave mu = 0 and change sigma to 0.5"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "A2HcmNfUMIlj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the likelihood using this function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "R5z_0dzQMF35"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the likelihood of all of the data under the model\n",
|
||||
"def compute_likelihood(y_train, mu, sigma):\n",
|
||||
" # TODO -- compute the likelihood of the data -- the product of the normal probabilities for each data point\n",
|
||||
" # Top line of equation 5.3 in the notes\n",
|
||||
" # You will need np.prod() and the normal_distribution function you used above\n",
|
||||
" # Replace the line below\n",
|
||||
" likelihood = 0\n",
|
||||
"\n",
|
||||
" return likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zpS7o6liCx7f"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this for a homoscedastic (constant sigma) model\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"mu_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"# Set the standard deviation to something reasonable\n",
|
||||
"sigma = 0.2\n",
|
||||
"# Compute the likelihood\n",
|
||||
"likelihood = compute_likelihood(y_train, mu_pred, sigma)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(0.000010624,likelihood))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1hQxBLoVNlr2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can see that this gives a very small answer, even for this small 1D dataset, and with the model fitting quite well. This is because it is the product of several probabilities, which are all quite small themselves.\n",
|
||||
"This will get out of hand pretty quickly with real datasets -- the likelihood will get so small that we can't represent it with normal finite-precision math\n",
|
||||
"\n",
|
||||
"This is why we use negative log likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HzphKgPfOvlk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the negative log likelihood of the data under the model\n",
|
||||
"def compute_negative_log_likelihood(y_train, mu, sigma):\n",
|
||||
" # TODO -- compute the likelihood of the data -- don't use the likelihood function above -- compute the negative sum of the log probabilities\n",
|
||||
" # Bottom line of equation 5.3 in the notes\n",
|
||||
" # You will need np.sum(), np.log()\n",
|
||||
" # Replace the line below\n",
|
||||
" nll = 0\n",
|
||||
"\n",
|
||||
" return nll"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dsT0CWiKBmTV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this for a homoscedastic (constant sigma) model\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"mu_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"# Set the standard deviation to something reasonable\n",
|
||||
"sigma = 0.2\n",
|
||||
"# Compute the log likelihood\n",
|
||||
"nll = compute_negative_log_likelihood(y_train, mu_pred, sigma)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(11.452419564,nll))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nVxUXg9rQmwI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"For good measure, let's compute the sum of squares as well"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "-S8bXApoWVLG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the squared distance between the predicted\n",
|
||||
"def compute_sum_of_squares(y_train, y_pred):\n",
|
||||
" # TODO -- compute the sum of squared distances between the training data and the model prediction\n",
|
||||
" # Eqn 5.10 in the notes. Make sure that you understand this, and ask questions if you don't\n",
|
||||
" # Replace the line below\n",
|
||||
" sum_of_squares = 0;\n",
|
||||
"\n",
|
||||
" return sum_of_squares"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "I1pjFdHCF4JZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this again\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"y_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"# Compute the log likelihood\n",
|
||||
"sum_of_squares = compute_sum_of_squares(y_train, y_pred)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(2.020992572,sum_of_squares))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2C40fskIHBx7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's investigate finding the maximum likelihood / minimum log likelihood / least squares solution. For simplicity, we'll assume that all the parameters are correct except one and look at how the likelihood, log likelihood, and sum of squares change as we manipulate the last parameter. We'll start with overall y offset, beta_1 (formerly phi_0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OgcRojvPWh4V"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a range of values for the parameter\n",
|
||||
"beta_1_vals = np.arange(0,1.0,0.01)\n",
|
||||
"# Create some arrays to store the likelihoods, negative log likelihoods and sum of squares\n",
|
||||
"likelihoods = np.zeros_like(beta_1_vals)\n",
|
||||
"nlls = np.zeros_like(beta_1_vals)\n",
|
||||
"sum_squares = np.zeros_like(beta_1_vals)\n",
|
||||
"\n",
|
||||
"# Initialise the parameters\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"sigma = 0.2\n",
|
||||
"for count in range(len(beta_1_vals)):\n",
|
||||
" # Set the value for the parameter\n",
|
||||
" beta_1[0,0] = beta_1_vals[count]\n",
|
||||
" # Run the network with new parameters\n",
|
||||
" mu_pred = y_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" # Compute and store the three values\n",
|
||||
" likelihoods[count] = compute_likelihood(y_train, mu_pred, sigma)\n",
|
||||
" nlls[count] = compute_negative_log_likelihood(y_train, mu_pred, sigma)\n",
|
||||
" sum_squares[count] = compute_sum_of_squares(y_train, y_pred)\n",
|
||||
" # Draw the model for every 20th parameter setting\n",
|
||||
" if count % 20 == 0:\n",
|
||||
" # Run the model to get values to plot and plot it.\n",
|
||||
" y_model = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" plot_univariate_regression(x_model, y_model, x_train, y_train, sigma_model = sigma, title=\"beta1=%3.3f\"%(beta_1[0,0]))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pFKtDaAeVU4U"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
|
||||
"fig, ax = plt.subplots(1,3)\n",
|
||||
"fig.set_size_inches(10.5, 3.5)\n",
|
||||
"fig.tight_layout(pad=3.0)\n",
|
||||
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0]$'); ax[0].set_ylabel('likelihood')\n",
|
||||
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0]'); ax[1].set_ylabel('negative log likelihood')\n",
|
||||
"ax[2].plot(beta_1_vals, sum_squares); ax[2].set_xlabel('beta_1[0]'); ax[2].set_ylabel('sum of squares')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UHXeTa9MagO6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Hopefully, you can see that the maximum of the likelihood fn is at the same position as the minimum negative log likelihood\n",
|
||||
"# and the least squares solutions\n",
|
||||
"# Let's check that:\n",
|
||||
"print(\"Maximum likelihood = %3.3f, at beta_1=%3.3f\"%( (likelihoods[np.argmax(likelihoods)],beta_1_vals[np.argmax(likelihoods)])))\n",
|
||||
"print(\"Minimum negative log likelihood = %3.3f, at beta_1=%3.3f\"%( (nlls[np.argmin(nlls)],beta_1_vals[np.argmin(nlls)])))\n",
|
||||
"print(\"Least squares = %3.3f, at beta_1=%3.3f\"%( (sum_squares[np.argmin(sum_squares)],beta_1_vals[np.argmin(sum_squares)])))\n",
|
||||
"\n",
|
||||
"# Plot the best model\n",
|
||||
"beta_1[0,0] = beta_1_vals[np.argmin(sum_squares)]\n",
|
||||
"y_model = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"plot_univariate_regression(x_model, y_model, x_train, y_train, sigma_model = sigma, title=\"beta1=%3.3f\"%(beta_1[0,0]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aDEPhddNdN4u"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"They all give the same answer. But you can see from the three plots above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood or the least squares.<br>\n",
|
||||
"\n",
|
||||
"Let's do the same thing with the standard deviation parameter of our network. This is not an output of the network (unless we choose to make that the case), but it still affects the likelihood.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "771G8N1Vk5A2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a range of values for the parameter\n",
|
||||
"sigma_vals = np.arange(0.1,0.5,0.005)\n",
|
||||
"# Create some arrays to store the likelihoods, negative log likelihoods and sum of squares\n",
|
||||
"likelihoods = np.zeros_like(sigma_vals)\n",
|
||||
"nlls = np.zeros_like(sigma_vals)\n",
|
||||
"sum_squares = np.zeros_like(sigma_vals)\n",
|
||||
"\n",
|
||||
"# Initialise the parameters\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Might as well set to the best offset\n",
|
||||
"beta_1[0,0] = 0.27\n",
|
||||
"for count in range(len(sigma_vals)):\n",
|
||||
" # Set the value for the parameter\n",
|
||||
" sigma = sigma_vals[count]\n",
|
||||
" # Run the network with new parameters\n",
|
||||
" mu_pred = y_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" # Compute and store the three values\n",
|
||||
" likelihoods[count] = compute_likelihood(y_train, mu_pred, sigma)\n",
|
||||
" nlls[count] = compute_negative_log_likelihood(y_train, mu_pred, sigma)\n",
|
||||
" sum_squares[count] = compute_sum_of_squares(y_train, y_pred)\n",
|
||||
" # Draw the model for every 20th parameter setting\n",
|
||||
" if count % 20 == 0:\n",
|
||||
" # Run the model to get values to plot and plot it.\n",
|
||||
" y_model = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" plot_univariate_regression(x_model, y_model, x_train, y_train, sigma_model=sigma, title=\"sigma=%3.3f\"%(sigma))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dMNAr0R8gg82"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the standard divation sigma\n",
|
||||
"fig, ax = plt.subplots(1,3)\n",
|
||||
"fig.set_size_inches(10.5, 3.5)\n",
|
||||
"fig.tight_layout(pad=3.0)\n",
|
||||
"ax[0].plot(sigma_vals, likelihoods); ax[0].set_xlabel('$\\sigma$'); ax[0].set_ylabel('likelihood')\n",
|
||||
"ax[1].plot(sigma_vals, nlls); ax[1].set_xlabel('$\\sigma$'); ax[1].set_ylabel('negative log likelihood')\n",
|
||||
"ax[2].plot(sigma_vals, sum_squares); ax[2].set_xlabel('$\\sigma$'); ax[2].set_ylabel('sum of squares')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "l9jduyHLDAZC"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Hopefully, you can see that the maximum of the likelihood fn is at the same position as the minimum negative log likelihood\n",
|
||||
"# The least squares solution does not depend on sigma, so it's just flat -- no use here.\n",
|
||||
"# Let's check that:\n",
|
||||
"print(\"Maximum likelihood = %3.3f, at beta_1=%3.3f\"%( (likelihoods[np.argmax(likelihoods)],sigma_vals[np.argmax(likelihoods)])))\n",
|
||||
"print(\"Minimum negative log likelihood = %3.3f, at beta_1=%3.3f\"%( (nlls[np.argmin(nlls)],sigma_vals[np.argmin(nlls)])))\n",
|
||||
"# Plot the best model\n",
|
||||
"sigma= sigma_vals[np.argmin(nlls)]\n",
|
||||
"y_model = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"plot_univariate_regression(x_model, y_model, x_train, y_train, sigma_model = sigma, title=\"beta_1=%3.3f, sigma =%3.3f\"%(beta_1[0,0],sigma))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XH7yER52Dxt5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Obviously, to fit the full neural model we would vary all of the 10 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ (and maybe $\\sigma$) until we find the combination that have the maximum likelihood / minimum negative log likelihood / least squares.<br><br>\n",
|
||||
"\n",
|
||||
"Here we just varied one at a time as it is easier to see what is going on. This is known as **coordinate descent**.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "q_KeGNAHEbIt"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
428
Notebooks/Chap05/5_2_Binary_Cross_Entropy_Loss.ipynb
Normal file
428
Notebooks/Chap05/5_2_Binary_Cross_Entropy_Loss.ipynb
Normal file
@@ -0,0 +1,428 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOlPP7m+YTLyMPaN0WxRdrb",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap05/5_2_Binary_Cross_Entropy_Loss.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 5.2 Binary Cross-Entropy Loss**\n",
|
||||
"\n",
|
||||
"This notebook investigates the binary cross-entropy loss. It follows from applying the formula in section 5.2 to a loss function based on the Bernoulli distribution.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jSlFkICHwHQF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "PYMZ1x-Pv1ht"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Import math Library\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation\n",
|
||||
"\n",
|
||||
"# Define a shallow neural network\n",
|
||||
"def shallow_nn(x, beta_0, omega_0, beta_1, omaga_1):\n",
|
||||
" # Make sure that input data is (1 x n_data) array\n",
|
||||
" n_data = x.size\n",
|
||||
" x = np.reshape(x,(1,n_data))\n",
|
||||
"\n",
|
||||
" # This runs the network for ALL of the inputs, x at once so we can draw graph\n",
|
||||
" h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(omega_0,x))\n",
|
||||
" model_out = np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(omega_1,h1)\n",
|
||||
" return model_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fv7SZR3tv7mV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Get parameters for model -- we can call this function to easily reset them\n",
|
||||
"def get_parameters():\n",
|
||||
" # And we'll create a network that approximately fits it\n",
|
||||
" beta_0 = np.zeros((3,1)); # formerly theta_x0\n",
|
||||
" omega_0 = np.zeros((3,1)); # formerly theta_x1\n",
|
||||
" beta_1 = np.zeros((1,1)); # formerly phi_0\n",
|
||||
" omega_1 = np.zeros((1,3)); # formerly phi_x\n",
|
||||
"\n",
|
||||
" beta_0[0,0] = 0.3; beta_0[1,0] = -1.0; beta_0[2,0] = -0.5\n",
|
||||
" omega_0[0,0] = -1.0; omega_0[1,0] = 1.8; omega_0[2,0] = 0.65\n",
|
||||
" beta_1[0,0] = 2.6;\n",
|
||||
" omega_1[0,0] = -24.0; omega_1[0,1] = -8.0; omega_1[0,2] = 50.0\n",
|
||||
"\n",
|
||||
" return beta_0, omega_0, beta_1, omega_1"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pUT9Ain_HRim"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Utility function for plotting data\n",
|
||||
"def plot_binary_classification(x_model, out_model, lambda_model, x_data = None, y_data = None, title= None):\n",
|
||||
" # Make sure model data are 1D arrays\n",
|
||||
" x_model = np.squeeze(x_model)\n",
|
||||
" out_model = np.squeeze(out_model)\n",
|
||||
" lambda_model = np.squeeze(lambda_model)\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots(1,2)\n",
|
||||
" fig.set_size_inches(7.0, 3.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" ax[0].plot(x_model,out_model)\n",
|
||||
" ax[0].set_xlabel('Input, $x$'); ax[0].set_ylabel('Model output')\n",
|
||||
" ax[0].set_xlim([0,1]);ax[0].set_ylim([-4,4])\n",
|
||||
" if title is not None:\n",
|
||||
" ax[0].set_title(title)\n",
|
||||
" ax[1].plot(x_model,lambda_model)\n",
|
||||
" ax[1].set_xlabel('Input, $x$'); ax[1].set_ylabel('$\\lambda$ or Pr(y=1|x)')\n",
|
||||
" ax[1].set_xlim([0,1]);ax[1].set_ylim([-0.05,1.05])\n",
|
||||
" if title is not None:\n",
|
||||
" ax[1].set_title(title)\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax[1].plot(x_data, y_data, 'ko')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NRR67ri_1TzN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Binary classification\n",
|
||||
"\n",
|
||||
"In binary classification tasks, the network predicts the probability of the output belonging to class 1. Since probabilities must lie in [0,1] and the network can output arbitrary values, we map the network through a sigmoid function that ensures the range is valid."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PsgLZwsPxauP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Sigmoid function that maps [-infty,infty] to [0,1]\n",
|
||||
"def sigmoid(model_out):\n",
|
||||
" # TODO -- implement the logistic sigmoid function from equation 5.18\n",
|
||||
" # Replace this line:\n",
|
||||
" sig_model_out = np.zeros_like(model_out)\n",
|
||||
"\n",
|
||||
" return sig_model_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uFb8h-9IXnIe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create some 1D training data\n",
|
||||
"x_train = np.array([0.09291784,0.46809093,0.93089486,0.67612654,0.73441752,0.86847339,\\\n",
|
||||
" 0.49873225,0.51083168,0.18343972,0.99380898,0.27840809,0.38028817,\\\n",
|
||||
" 0.12055708,0.56715537,0.92005746,0.77072270,0.85278176,0.05315950,\\\n",
|
||||
" 0.87168699,0.58858043])\n",
|
||||
"y_train = np.array([0,1,1,0,0,1,\\\n",
|
||||
" 1,0,0,1,0,1,\\\n",
|
||||
" 0,1,1,0,1,0, \\\n",
|
||||
" 1,1])\n",
|
||||
"\n",
|
||||
"# Get parameters for the model\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x_model = np.arange(0,1,0.01)\n",
|
||||
"# Run the model to get values to plot and plot it.\n",
|
||||
"model_out= shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_model = sigmoid(model_out)\n",
|
||||
"plot_binary_classification(x_model, model_out, lambda_model, x_train, y_train)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VWzNOt1swFVd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The left is model output and the right is the model output after the sigmoid has been applied, so it now lies in the range [0,1] and represents the probability, that y=1. The black dots show the training data. We'll compute the the likelihood and the negative log likelihood."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MvVX6tl9AEXF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return probability under Bernoulli distribution for input x\n",
|
||||
"def bernoulli_distribution(y, lambda_param):\n",
|
||||
" # TODO-- write in the equation for the Bernoulli distribution\n",
|
||||
" # Equation 5.17 from the notes (you will need np.power)\n",
|
||||
" # Replace the line below\n",
|
||||
" prob = np.zeros_like(y)\n",
|
||||
"\n",
|
||||
" return prob"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YaLdRlEX0FkU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.8,bernoulli_distribution(0,0.2)))\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.2,bernoulli_distribution(1,0.2)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4TSL14dqHHbV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the likelihood using this function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "R5z_0dzQMF35"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the likelihood of all of the data under the model\n",
|
||||
"def compute_likelihood(y_train, lambda_param):\n",
|
||||
" # TODO -- compute the likelihood of the data -- the product of the Bernoulli probabilities for each data point\n",
|
||||
" # Top line of equation 5.3 in the notes\n",
|
||||
" # You will need np.prod() and the bernoulli_distribution function you used above\n",
|
||||
" # Replace the line below\n",
|
||||
" likelihood = 0\n",
|
||||
"\n",
|
||||
" return likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zpS7o6liCx7f"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_train = sigmoid(model_out)\n",
|
||||
"# Compute the likelihood\n",
|
||||
"likelihood = compute_likelihood(y_train, lambda_train)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(0.000070237,likelihood))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1hQxBLoVNlr2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can see that this gives a very small answer, even for this small 1D dataset, and with the model fitting quite well. This is because it is the product of several probabilities, which are all quite small themselves.\n",
|
||||
"This will get out of hand pretty quickly with real datasets -- the likelihood will get so small that we can't represent it with normal finite-precision math\n",
|
||||
"\n",
|
||||
"This is why we use negative log likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HzphKgPfOvlk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the negative log likelihood of the data under the model\n",
|
||||
"def compute_negative_log_likelihood(y_train, lambda_param):\n",
|
||||
" # TODO -- compute the likelihood of the data -- don't use the likelihood function above -- compute the negative sum of the log probabilities\n",
|
||||
" # You will need np.sum(), np.log()\n",
|
||||
" # Replace the line below\n",
|
||||
" nll = 0\n",
|
||||
"\n",
|
||||
" return nll"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dsT0CWiKBmTV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"# Pass through the sigmoid function\n",
|
||||
"lambda_train = sigmoid(model_out)\n",
|
||||
"# Compute the log likelihood\n",
|
||||
"nll = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(9.563639387,nll))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nVxUXg9rQmwI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's investigate finding the maximum likelihood / minimum negative log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, beta_1 (formerly phi_0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OgcRojvPWh4V"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a range of values for the parameter\n",
|
||||
"beta_1_vals = np.arange(-2,6.0,0.1)\n",
|
||||
"# Create some arrays to store the likelihoods, negative log likelihoods\n",
|
||||
"likelihoods = np.zeros_like(beta_1_vals)\n",
|
||||
"nlls = np.zeros_like(beta_1_vals)\n",
|
||||
"\n",
|
||||
"# Initialise the parameters\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"for count in range(len(beta_1_vals)):\n",
|
||||
" # Set the value for the parameter\n",
|
||||
" beta_1[0,0] = beta_1_vals[count]\n",
|
||||
" # Run the network with new parameters\n",
|
||||
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" lambda_train = sigmoid(model_out)\n",
|
||||
" # Compute and store the three values\n",
|
||||
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
||||
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||
" # Draw the model for every 20th parameter setting\n",
|
||||
" if count % 20 == 0:\n",
|
||||
" # Run the model to get values to plot and plot it.\n",
|
||||
" model_out = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" lambda_model = sigmoid(model_out)\n",
|
||||
" plot_binary_classification(x_model, model_out, lambda_model, x_train, y_train, title=\"beta_1[0]=%3.3f\"%(beta_1[0,0]))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pFKtDaAeVU4U"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the likelihood, and negative log likelihoods as a function the value of the offset beta1\n",
|
||||
"fig, ax = plt.subplots(1,2)\n",
|
||||
"fig.set_size_inches(10.5, 3.5)\n",
|
||||
"fig.tight_layout(pad=3.0)\n",
|
||||
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0]'); ax[0].set_ylabel('likelihood')\n",
|
||||
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0]'); ax[1].set_ylabel('negative log likelihood')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UHXeTa9MagO6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Hopefully, you can see that the maximum of the likelihood fn is at the same position as the minimum negative log likelihood\n",
|
||||
"# Let's check that:\n",
|
||||
"print(\"Maximum likelihood = %f, at beta_1=%3.3f\"%( (likelihoods[np.argmax(likelihoods)],beta_1_vals[np.argmax(likelihoods)])))\n",
|
||||
"print(\"Minimum negative log likelihood = %f, at beta_1=%3.3f\"%( (nlls[np.argmin(nlls)],beta_1_vals[np.argmin(nlls)])))\n",
|
||||
"\n",
|
||||
"# Plot the best model\n",
|
||||
"beta_1[0,0] = beta_1_vals[np.argmin(nlls)]\n",
|
||||
"model_out = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_model = sigmoid(model_out)\n",
|
||||
"plot_binary_classification(x_model, model_out, lambda_model, x_train, y_train, title=\"beta_1[0]=%3.3f\"%(beta_1[0,0]))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aDEPhddNdN4u"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
||||
"\n",
|
||||
"Again, to fit the full neural model we would vary all of the 10 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "771G8N1Vk5A2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
451
Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb
Normal file
451
Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb
Normal file
@@ -0,0 +1,451 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOVTohDBtmCCzSEqLJ4J9R/",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 5.3 Multiclass Cross-Entropy Loss**\n",
|
||||
"\n",
|
||||
"This notebook investigates the multi-class cross-entropy loss. It follows from applying the formula in section 5.2 to a loss function based on the Categorical distribution.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jSlFkICHwHQF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "PYMZ1x-Pv1ht"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports math library\n",
|
||||
"import numpy as np\n",
|
||||
"# Used for repmat\n",
|
||||
"import numpy.matlib\n",
|
||||
"# Imports plotting library\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Import math Library\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation\n",
|
||||
"\n",
|
||||
"# Define a shallow neural network\n",
|
||||
"def shallow_nn(x, beta_0, omega_0, beta_1, omaga_1):\n",
|
||||
" # Make sure that input data is (1 x n_data) array\n",
|
||||
" n_data = x.size\n",
|
||||
" x = np.reshape(x,(1,n_data))\n",
|
||||
"\n",
|
||||
" # This runs the network for ALL of the inputs, x at once so we can draw graph\n",
|
||||
" h1 = ReLU(np.matmul(beta_0,np.ones((1,n_data))) + np.matmul(omega_0,x))\n",
|
||||
" model_out = np.matmul(beta_1,np.ones((1,n_data))) + np.matmul(omega_1,h1)\n",
|
||||
" return model_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fv7SZR3tv7mV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Get parameters for model -- we can call this function to easily reset them\n",
|
||||
"def get_parameters():\n",
|
||||
" # And we'll create a network that approximately fits it\n",
|
||||
" beta_0 = np.zeros((3,1)); # formerly theta_x0\n",
|
||||
" omega_0 = np.zeros((3,1)); # formerly theta_x1\n",
|
||||
" beta_1 = np.zeros((3,1)); # NOTE -- there are three outputs now (one for each class, so three output biases)\n",
|
||||
" omega_1 = np.zeros((3,3)); # NOTE -- there are three outputs now (one for each class, so nine output weights, connecting 3 hidden units to 3 outputs)\n",
|
||||
"\n",
|
||||
" beta_0[0,0] = 0.3; beta_0[1,0] = -1.0; beta_0[2,0] = -0.5\n",
|
||||
" omega_0[0,0] = -1.0; omega_0[1,0] = 1.8; omega_0[2,0] = 0.65\n",
|
||||
" beta_1[0,0] = 2.0; beta_1[1,0] = -2; beta_1[2,0] = 0.0\n",
|
||||
" omega_1[0,0] = -24.0; omega_1[0,1] = -8.0; omega_1[0,2] = 50.0\n",
|
||||
" omega_1[1,0] = -2.0; omega_1[1,1] = 8.0; omega_1[1,2] = -30.0\n",
|
||||
" omega_1[2,0] = 16.0; omega_1[2,1] = -8.0; omega_1[2,2] =-8\n",
|
||||
"\n",
|
||||
" return beta_0, omega_0, beta_1, omega_1"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pUT9Ain_HRim"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Utility function for plotting data\n",
|
||||
"def plot_multiclass_classification(x_model, out_model, lambda_model, x_data = None, y_data = None, title= None):\n",
|
||||
" # Make sure model data are 1D arrays\n",
|
||||
" n_data = len(x_model)\n",
|
||||
" n_class = 3\n",
|
||||
" x_model = np.squeeze(x_model)\n",
|
||||
" out_model = np.reshape(out_model, (n_class,n_data))\n",
|
||||
" lambda_model = np.reshape(lambda_model, (n_class,n_data))\n",
|
||||
"\n",
|
||||
" fig, ax = plt.subplots(1,2)\n",
|
||||
" fig.set_size_inches(7.0, 3.5)\n",
|
||||
" fig.tight_layout(pad=3.0)\n",
|
||||
" ax[0].plot(x_model,out_model[0,:],'r-')\n",
|
||||
" ax[0].plot(x_model,out_model[1,:],'g-')\n",
|
||||
" ax[0].plot(x_model,out_model[2,:],'b-')\n",
|
||||
" ax[0].set_xlabel('Input, $x$'); ax[0].set_ylabel('Model outputs')\n",
|
||||
" ax[0].set_xlim([0,1]);ax[0].set_ylim([-4,4])\n",
|
||||
" if title is not None:\n",
|
||||
" ax[0].set_title(title)\n",
|
||||
" ax[1].plot(x_model,lambda_model[0,:],'r-')\n",
|
||||
" ax[1].plot(x_model,lambda_model[1,:],'g-')\n",
|
||||
" ax[1].plot(x_model,lambda_model[2,:],'b-')\n",
|
||||
" ax[1].set_xlabel('Input, $x$'); ax[1].set_ylabel('$\\lambda$ or Pr(y=k|x)')\n",
|
||||
" ax[1].set_xlim([0,1]);ax[1].set_ylim([-0.1,1.05])\n",
|
||||
" if title is not None:\n",
|
||||
" ax[1].set_title(title)\n",
|
||||
" if x_data is not None:\n",
|
||||
" for i in range(len(x_data)):\n",
|
||||
" if y_data[i] ==0:\n",
|
||||
" ax[1].plot(x_data[i],-0.05, 'r.')\n",
|
||||
" if y_data[i] ==1:\n",
|
||||
" ax[1].plot(x_data[i],-0.05, 'g.')\n",
|
||||
" if y_data[i] ==2:\n",
|
||||
" ax[1].plot(x_data[i],-0.05, 'b.')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NRR67ri_1TzN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Multiclass classification\n",
|
||||
"\n",
|
||||
"For multiclass classification, the network must predict the probability of $K$ classes, using $K$ outputs. However, these probability must be non-negative and sum to one, and the network outputs can take arbitrary values. Hence, we pass the outputs through a softmax function which maps $K$ arbitrary values to $K$ non-negative values that sum to one."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PsgLZwsPxauP"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Softmax function that maps a vector of arbitrary values to a vector of values that are positive and sum to one.\n",
|
||||
"def softmax(model_out):\n",
|
||||
" # This operation has to be done separately for every column of the input\n",
|
||||
" # Compute exponentials of all the elements\n",
|
||||
" # TODO: compute the softmax function (eq 5.22)\n",
|
||||
" # Replace this skeleton code\n",
|
||||
"\n",
|
||||
" # Compute the exponential of the model outputs\n",
|
||||
" exp_model_out = np.zeros_like(model_out) ;\n",
|
||||
" # Compute the sum of the exponentials (denominator of equation 5.22)\n",
|
||||
" sum_exp_model_out = np.zeros_like(model_out) ;\n",
|
||||
" # Normalize the exponentials (np.matlib.repmat might be useful here)\n",
|
||||
" softmax_model_out = np.ones_like(model_out)/ exp_model_out.shape[0]\n",
|
||||
"\n",
|
||||
" return softmax_model_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uFb8h-9IXnIe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"# Let's create some 1D training data\n",
|
||||
"x_train = np.array([0.09291784,0.46809093,0.93089486,0.67612654,0.73441752,0.86847339,\\\n",
|
||||
" 0.49873225,0.51083168,0.18343972,0.99380898,0.27840809,0.38028817,\\\n",
|
||||
" 0.12055708,0.56715537,0.92005746,0.77072270,0.85278176,0.05315950,\\\n",
|
||||
" 0.87168699,0.58858043])\n",
|
||||
"y_train = np.array([2,0,1,2,1,0,\\\n",
|
||||
" 0,2,2,0,2,0,\\\n",
|
||||
" 2,0,1,2,1,2, \\\n",
|
||||
" 1,0])\n",
|
||||
"\n",
|
||||
"# Get parameters for the model\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"\n",
|
||||
"# Define a range of input values\n",
|
||||
"x_model = np.arange(0,1,0.01)\n",
|
||||
"# Run the model to get values to plot and plot it.\n",
|
||||
"model_out= shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_model = softmax(model_out)\n",
|
||||
"plot_multiclass_classification(x_model, model_out, lambda_model, x_train, y_train)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VWzNOt1swFVd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The left is model output and the right is the model output after the softmax has been applied, so it now lies in the range [0,1] and represents the probability, that y=0 (red), 1 (green) and 2 (blue) The dots at the bottom show the training data with the same color scheme. So we want the red curve to be high where there are red dots, the green curve to be high where there are green dots, and the blue curve to be high where there are blue dots We'll compute the the likelihood and the negative log likelihood."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MvVX6tl9AEXF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return probability under Categorical distribution for input x\n",
|
||||
"# Just take value from row k of lambda param where y =k,\n",
|
||||
"def categorical_distribution(y, lambda_param):\n",
|
||||
" return np.array([lambda_param[row, i] for i, row in enumerate (y)])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YaLdRlEX0FkU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.2,categorical_distribution(np.array([[0]]),np.array([[0.2],[0.5],[0.3]]))))\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.5,categorical_distribution(np.array([[1]]),np.array([[0.2],[0.5],[0.3]]))))\n",
|
||||
"print(\"Correct answer = %3.3f, Your answer = %3.3f\"%(0.3,categorical_distribution(np.array([[2]]),np.array([[0.2],[0.5],[0.3]]))))\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4TSL14dqHHbV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the likelihood using this function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "R5z_0dzQMF35"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the likelihood of all of the data under the model\n",
|
||||
"def compute_likelihood(y_train, lambda_param):\n",
|
||||
" # TODO -- compute the likelihood of the data -- the product of the categorical probabilities for each data point\n",
|
||||
" # Top line of equation 5.3 in the notes\n",
|
||||
" # You will need np.prod() and the categorical_distribution function you used above\n",
|
||||
" # Replace the line below\n",
|
||||
" likelihood = 0\n",
|
||||
"\n",
|
||||
" return likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zpS7o6liCx7f"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_train = softmax(model_out)\n",
|
||||
"# Compute the likelihood\n",
|
||||
"likelihood = compute_likelihood(y_train, lambda_train)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(0.000000041,likelihood))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1hQxBLoVNlr2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can see that this gives a very small answer, even for this small 1D dataset, and with the model fitting quite well. This is because it is the product of several probabilities, which are all quite small themselves.\n",
|
||||
"This will get out of hand pretty quickly with real datasets -- the likelihood will get so small that we can't represent it with normal finite-precision math\n",
|
||||
"\n",
|
||||
"This is why we use negative log likelihood"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HzphKgPfOvlk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Return the negative log likelihood of the data under the model\n",
|
||||
"def compute_negative_log_likelihood(y_train, lambda_param):\n",
|
||||
" # TODO -- compute the likelihood of the data -- don't use the likelihood function above -- compute the negative sum of the log probabilities\n",
|
||||
" # You will need np.sum(), np.log()\n",
|
||||
" # Replace the line below\n",
|
||||
" nll = 0\n",
|
||||
"\n",
|
||||
" return nll"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dsT0CWiKBmTV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"# Pass the outputs through the softmax function\n",
|
||||
"lambda_train = softmax(model_out)\n",
|
||||
"# Compute the log likelihood\n",
|
||||
"nll = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||
"# Let's double check we get the right answer before proceeding\n",
|
||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(17.015457867,nll))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nVxUXg9rQmwI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's investigate finding the maximum likelihood / minimum log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, beta_1 (formerly phi_0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OgcRojvPWh4V"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a range of values for the parameter\n",
|
||||
"beta_1_vals = np.arange(-2,6.0,0.1)\n",
|
||||
"# Create some arrays to store the likelihoods, negative log likelihoods\n",
|
||||
"likelihoods = np.zeros_like(beta_1_vals)\n",
|
||||
"nlls = np.zeros_like(beta_1_vals)\n",
|
||||
"\n",
|
||||
"# Initialise the parameters\n",
|
||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||
"for count in range(len(beta_1_vals)):\n",
|
||||
" # Set the value for the parameter\n",
|
||||
" beta_1[0,0] = beta_1_vals[count]\n",
|
||||
" # Run the network with new parameters\n",
|
||||
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" lambda_train = softmax(model_out)\n",
|
||||
" # Compute and store the three values\n",
|
||||
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
||||
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||
" # Draw the model for every 20th parameter setting\n",
|
||||
" if count % 20 == 0:\n",
|
||||
" # Run the model to get values to plot and plot it.\n",
|
||||
" model_out = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
" lambda_model = softmax(model_out)\n",
|
||||
" plot_multiclass_classification(x_model, model_out, lambda_model, x_train, y_train, title=\"beta1[0,0]=%3.3f\"%(beta_1[0,0]))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pFKtDaAeVU4U"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's plot the likelihood, negative log likelihood as a function the value of the offset beta1\n",
|
||||
"fig, ax = plt.subplots(1,2)\n",
|
||||
"fig.set_size_inches(10.5, 3.5)\n",
|
||||
"fig.tight_layout(pad=3.0)\n",
|
||||
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0,0]'); ax[0].set_ylabel('likelihood')\n",
|
||||
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0,0]'); ax[1].set_ylabel('negative log likelihood')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UHXeTa9MagO6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Hopefully, you can see that the maximum of the likelihood fn is at the same position as the minimum negative log likelihood solution\n",
|
||||
"# Let's check that:\n",
|
||||
"print(\"Maximum likelihood = %f, at beta_1=%3.3f\"%( (likelihoods[np.argmax(likelihoods)],beta_1_vals[np.argmax(likelihoods)])))\n",
|
||||
"print(\"Minimum negative log likelihood = %f, at beta_1=%3.3f\"%( (nlls[np.argmin(nlls)],beta_1_vals[np.argmin(nlls)])))\n",
|
||||
"\n",
|
||||
"# Plot the best model\n",
|
||||
"beta_1[0,0] = beta_1_vals[np.argmin(nlls)]\n",
|
||||
"model_out = shallow_nn(x_model, beta_0, omega_0, beta_1, omega_1)\n",
|
||||
"lambda_model = softmax(model_out)\n",
|
||||
"plot_multiclass_classification(x_model, model_out, lambda_model, x_train, y_train, title=\"beta1[0,0]=%3.3f\"%(beta_1[0,0]))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aDEPhddNdN4u"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
||||
"\n",
|
||||
"Again, to fit the full neural model we would vary all of the 16 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "771G8N1Vk5A2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
192
Notebooks/Chap06/6_1_Line_Search.ipynb
Normal file
192
Notebooks/Chap06/6_1_Line_Search.ipynb
Normal file
@@ -0,0 +1,192 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOfxeJ15PMkIi4geDTRCz3c",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap06/6_1_Line_Search.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 6.1: Line search**\n",
|
||||
"\n",
|
||||
"This notebook investigates how to find the minimum of a 1D function using line search as described in Figure 6.10.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create a simple 1D function\n",
|
||||
"def loss_function(phi):\n",
|
||||
" return 1- 0.5 * np.exp(-(phi-0.65)*(phi-0.65)/0.1) - 0.45 *np.exp(-(phi-0.35)*(phi-0.35)/0.02)\n",
|
||||
"\n",
|
||||
"def draw_function(loss_function,a=None, b=None, c=None, d=None):\n",
|
||||
" # Plot the function\n",
|
||||
" phi_plot = np.arange(0,1,0.01);\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(phi_plot,loss_function(phi_plot),'r-')\n",
|
||||
" ax.set_xlim(0,1); ax.set_ylim(0,1)\n",
|
||||
" ax.set_xlabel('$\\phi$'); ax.set_ylabel('$L[\\phi]$')\n",
|
||||
" if a is not None and b is not None and c is not None and d is not None:\n",
|
||||
" plt.axvspan(a, d, facecolor='k', alpha=0.2)\n",
|
||||
" ax.plot([a,a],[0,1],'b-')\n",
|
||||
" ax.plot([b,b],[0,1],'b-')\n",
|
||||
" ax.plot([c,c],[0,1],'b-')\n",
|
||||
" ax.plot([d,d],[0,1],'b-')\n",
|
||||
" plt.show()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qFRe9POHF2le"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw this function\n",
|
||||
"draw_function(loss_function)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TXx1Tpd1Tl-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now lets create a line search procedure to find the minimum in the range 0,1"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QU5mdGvpTtEG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def line_search(loss_function, thresh=.0001, max_iter = 10, draw_flag = False):\n",
|
||||
"\n",
|
||||
" # Initialize four points along the range we are going to search\n",
|
||||
" a = 0\n",
|
||||
" b = 0.33\n",
|
||||
" c = 0.66\n",
|
||||
" d = 1.0\n",
|
||||
" n_iter =0;\n",
|
||||
"\n",
|
||||
" # While we haven't found the minimum closely enough\n",
|
||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||
" # Increment iteration counter (just to prevent an infinite loop)\n",
|
||||
" n_iter = n_iter+1\n",
|
||||
"\n",
|
||||
" # Calculate all four points\n",
|
||||
" lossa = loss_function(a)\n",
|
||||
" lossb = loss_function(b)\n",
|
||||
" lossc = loss_function(c)\n",
|
||||
" lossd = loss_function(d)\n",
|
||||
"\n",
|
||||
" if draw_flag:\n",
|
||||
" draw_function(loss_function, a,b,c,d)\n",
|
||||
"\n",
|
||||
" print('Iter %d, a=%3.3f, b=%3.3f, c=%3.3f, d=%3.3f'%(n_iter, a,b,c,d))\n",
|
||||
"\n",
|
||||
" # Rule #1 If point A is less than points B, C, and D then halve values of B, C, and D\n",
|
||||
" # i.e. bring them closer to the original point\n",
|
||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||
" if (0):\n",
|
||||
" continue;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Rule #2 If point b is less than point c then\n",
|
||||
" # then point d becomes point c, and\n",
|
||||
" # point b becomes 1/3 between a and new d\n",
|
||||
" # point c becomes 2/3 between a and new d\n",
|
||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||
" if (0):\n",
|
||||
" continue;\n",
|
||||
"\n",
|
||||
" # Rule #3 If point c is less than point b then\n",
|
||||
" # then point a becomes point b, and\n",
|
||||
" # point b becomes 1/3 between new a and d\n",
|
||||
" # point c becomes 2/3 between new a and d\n",
|
||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||
" if(0):\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" # TODO -- FINAL SOLUTION IS AVERAGE OF B and C\n",
|
||||
" # REPLACE THIS LINE\n",
|
||||
" soln = 1\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return soln"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K-NTHpAAHlCl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"soln = line_search(loss_function, draw_flag=True)\n",
|
||||
"print('Soln = %3.3f, loss = %3.3f'%(soln,loss_function(soln)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YVq6rmaWRD2M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"id": "tOLd0gtdRLLS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
421
Notebooks/Chap06/6_2_Gradient_Descent.ipynb
Normal file
421
Notebooks/Chap06/6_2_Gradient_Descent.ipynb
Normal file
@@ -0,0 +1,421 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyM/FIXDTd6tZYs6WRzK00hB",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap06/6_2_Gradient_Descent.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 6.2 Gradient descent**\n",
|
||||
"\n",
|
||||
"This notebook recreates the gradient descent algorithm as shon in figure 6.1.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib import cm\n",
|
||||
"from matplotlib.colors import ListedColormap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create our training data 12 pairs {x_i, y_i}\n",
|
||||
"# We'll try to fit the straight line model to these data\n",
|
||||
"data = np.array([[0.03,0.19,0.34,0.46,0.78,0.81,1.08,1.18,1.39,1.60,1.65,1.90],\n",
|
||||
" [0.67,0.85,1.05,1.00,1.40,1.50,1.30,1.54,1.55,1.68,1.73,1.60]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4cRkrh9MZ58Z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's define our model -- just a straight line with intercept phi[0] and slope phi[1]\n",
|
||||
"def model(phi,x):\n",
|
||||
" y_pred = phi[0]+phi[1] * x\n",
|
||||
" return y_pred"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WQUERmb2erAe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw model\n",
|
||||
"def draw_model(data,model,phi,title=None):\n",
|
||||
" x_model = np.arange(0,2,0.01)\n",
|
||||
" y_model = model(phi,x_model)\n",
|
||||
"\n",
|
||||
" fix, ax = plt.subplots()\n",
|
||||
" ax.plot(data[0,:],data[1,:],'bo')\n",
|
||||
" ax.plot(x_model,y_model,'m-')\n",
|
||||
" ax.set_xlim([0,2]);ax.set_ylim([0,2])\n",
|
||||
" ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||
" ax.set_aspect('equal')\n",
|
||||
" if title is not None:\n",
|
||||
" ax.set_title(title)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qFRe9POHF2le"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters to some arbitrary values and draw the model\n",
|
||||
"phi = np.zeros((2,1))\n",
|
||||
"phi[0] = 0.6 # Intercept\n",
|
||||
"phi[1] = -0.2 # Slope\n",
|
||||
"draw_model(data,model,phi, \"Initial parameters\")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TXx1Tpd1Tl-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now lets create compute the sum of squares loss for the training data"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QU5mdGvpTtEG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_loss(data_x, data_y, model, phi):\n",
|
||||
" # TODO -- Write this function -- replace the line below\n",
|
||||
" # First make model predictions from data x\n",
|
||||
" # Then compute the squared difference between the predictions and true y values\n",
|
||||
" # Then sum them all and return\n",
|
||||
" pred_y = 0\n",
|
||||
" loss = 0\n",
|
||||
"\n",
|
||||
" return loss"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "I7dqTY2Gg7CR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's just test that we got that right"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "eB5DQvU5hYNx"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"loss = compute_loss(data[0,:],data[1,:],model,np.array([[0.6],[-0.2]]))\n",
|
||||
"print('Your loss = %3.3f, Correct loss = %3.3f'%(loss, 12.367))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Ty05UtEEg9tc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's plot the whole loss function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F3trnavPiHpH"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def draw_loss_function(compute_loss, data, model, phi_iters = None):\n",
|
||||
" # Define pretty colormap\n",
|
||||
" my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||
" my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||
" r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||
" g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" intercepts_mesh, slopes_mesh = np.meshgrid(np.arange(0.0,2.0,0.02), np.arange(-1.0,1.0,0.002))\n",
|
||||
" loss_mesh = np.zeros_like(slopes_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(slopes_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_loss(data[0,:], data[1,:], model, np.array([[intercepts_mesh[idslope]], [slope]]))\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(intercepts_mesh,slopes_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(intercepts_mesh,slopes_mesh,loss_mesh,40,colors=['#80808080'])\n",
|
||||
" if phi_iters is not None:\n",
|
||||
" ax.plot(phi_iters[0,:], phi_iters[1,:],'go-')\n",
|
||||
" ax.set_ylim([1,-1])\n",
|
||||
" ax.set_xlabel('Intercept $\\phi_{0}$'); ax.set_ylabel('Slope, $\\phi_{1}$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K-NTHpAAHlCl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"draw_loss_function(compute_loss, data, model)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "l8HbvIupnTME"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the gradient vector for a given set of parameters:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial L}{\\partial \\boldsymbol\\phi} = \\begin{bmatrix}\\frac{\\partial L}{\\partial \\phi_0} \\\\\\frac{\\partial L}{\\partial \\phi_1} \\end{bmatrix}.\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "s9Duf05WqqSC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# These are in the lecture slides and notes, but worth trying to calculate them yourself to\n",
|
||||
"# check that you get them right. Write out the expression for the sum of squares loss and take the\n",
|
||||
"# derivative with respect to phi0 and phi1\n",
|
||||
"def compute_gradient(data_x, data_y, phi):\n",
|
||||
" # TODO -- write this function, replacing the lines below\n",
|
||||
" dl_dphi0 = 0.0\n",
|
||||
" dl_dphi1 = 0.0\n",
|
||||
"\n",
|
||||
" # Return the gradient\n",
|
||||
" return np.array([[dl_dphi0],[dl_dphi1]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UpswmkL2qwBT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We can check we got this right using a trick known as **finite differences**. If we evaluate the function and then change one of the parameters by a very small amount and normalize by that amount, we get an approximation to the gradient, so:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\frac{\\partial L}{\\partial \\phi_{0}}&\\approx & \\frac{L[\\phi_0+\\delta, \\phi_1]-L[\\phi_0, \\phi_1]}{\\delta}\\\\\n",
|
||||
"\\frac{\\partial L}{\\partial \\phi_{1}}&\\approx & \\frac{L[\\phi_0, \\phi_1+\\delta]-L[\\phi_0, \\phi_1]}{\\delta}\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"We can't do this when there are many parameters; for a million parameters, we would have to evaluate the loss function two million times, and usually computing the gradients directly is much more efficient."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RS1nEcYVuEAM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Compute the gradient using your function\n",
|
||||
"gradient = compute_gradient(data[0,:],data[1,:], phi)\n",
|
||||
"print(\"Your gradients: (%3.3f,%3.3f)\"%(gradient[0],gradient[1]))\n",
|
||||
"# Approximate the gradients with finite differences\n",
|
||||
"delta = 0.0001\n",
|
||||
"dl_dphi0_est = (compute_loss(data[0,:],data[1,:],model,phi+np.array([[delta],[0]])) - \\\n",
|
||||
" compute_loss(data[0,:],data[1,:],model,phi))/delta\n",
|
||||
"dl_dphi1_est = (compute_loss(data[0,:],data[1,:],model,phi+np.array([[0],[delta]])) - \\\n",
|
||||
" compute_loss(data[0,:],data[1,:],model,phi))/delta\n",
|
||||
"print(\"Approx gradients: (%3.3f,%3.3f)\"%(dl_dphi0_est,dl_dphi1_est))\n",
|
||||
"# There might be small differences in the last significant figure because finite gradients is an approximation\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QuwAHN7yt-gi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we are ready to perform gradient descent. We'll need to use our line search routine from part I, which I've reproduced here plus the helper function loss_function_1D that converts from a 2D problem to a 1D problem"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5EIjMM9Fw2eT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def loss_function_1D(dist_prop, data, model, phi_start, gradient):\n",
|
||||
" # Return the loss after moving this far\n",
|
||||
" return compute_loss(data[0,:], data[1,:], model, phi_start+ gradient * dist_prop)\n",
|
||||
"\n",
|
||||
"def line_search(data, model, phi, gradient, thresh=.00001, max_dist = 0.1, max_iter = 15, verbose=False):\n",
|
||||
" # Initialize four points along the range we are going to search\n",
|
||||
" a = 0\n",
|
||||
" b = 0.33 * max_dist\n",
|
||||
" c = 0.66 * max_dist\n",
|
||||
" d = 1.0 * max_dist\n",
|
||||
" n_iter =0;\n",
|
||||
"\n",
|
||||
" # While we haven't found the minimum closely enough\n",
|
||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||
" # Increment iteration counter (just to prevent an infinite loop)\n",
|
||||
" n_iter = n_iter+1\n",
|
||||
" # Calculate all four points\n",
|
||||
" lossa = loss_function_1D(a, data, model, phi,gradient)\n",
|
||||
" lossb = loss_function_1D(b, data, model, phi,gradient)\n",
|
||||
" lossc = loss_function_1D(c, data, model, phi,gradient)\n",
|
||||
" lossd = loss_function_1D(d, data, model, phi,gradient)\n",
|
||||
"\n",
|
||||
" if verbose:\n",
|
||||
" print('Iter %d, a=%3.3f, b=%3.3f, c=%3.3f, d=%3.3f'%(n_iter, a,b,c,d))\n",
|
||||
" print('a %f, b%f, c%f, d%f'%(lossa,lossb,lossc,lossd))\n",
|
||||
"\n",
|
||||
" # Rule #1 If point A is less than points B, C, and D then halve points B,C, and D\n",
|
||||
" if np.argmin((lossa,lossb,lossc,lossd))==0:\n",
|
||||
" b = b/2\n",
|
||||
" c = c/2\n",
|
||||
" d = d/2\n",
|
||||
" continue;\n",
|
||||
"\n",
|
||||
" # Rule #2 If point b is less than point c then\n",
|
||||
" # then point d becomes point c, and\n",
|
||||
" # point b becomes 1/3 between a and new d\n",
|
||||
" # point c becomes 2/3 between a and new d\n",
|
||||
" if lossb < lossc:\n",
|
||||
" d = c\n",
|
||||
" b = a+ (d-a)/3\n",
|
||||
" c = a+ 2*(d-a)/3\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" # Rule #2 If point c is less than point b then\n",
|
||||
" # then point a becomes point b, and\n",
|
||||
" # point b becomes 1/3 between new a and d\n",
|
||||
" # point c becomes 2/3 between new a and d\n",
|
||||
" a = b\n",
|
||||
" b = a+ (d-a)/3\n",
|
||||
" c = a+ 2*(d-a)/3\n",
|
||||
"\n",
|
||||
" # Return average of two middle points\n",
|
||||
" return (b+c)/2.0"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XrJ2gQjfw1XP"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def gradient_descent_step(phi, data, model):\n",
|
||||
" # TODO -- update Phi with the gradient descent step (equation 6.3)\n",
|
||||
" # 1. Compute the gradient\n",
|
||||
" # 2. Find the best step size alpha (use negative gradient as going downhill)\n",
|
||||
" # 3. Update the parameters phi\n",
|
||||
"\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YVq6rmaWRD2M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters and draw the model\n",
|
||||
"n_steps = 10\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = 1.6\n",
|
||||
"phi_all[1,0] = -0.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"# Repeatedly take gradient descent steps\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step(phi_all[:,c_step:c_step+1],data, model)\n",
|
||||
" # Measure loss and draw model\n",
|
||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"# Draw the trajectory on the loss function\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tOLd0gtdRLLS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
586
Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb
Normal file
586
Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb
Normal file
@@ -0,0 +1,586 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNk5FN4qlw3pk8BwDVWw1jN",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 6.3: Stochastic gradient descent**\n",
|
||||
"\n",
|
||||
"This notebook investigates gradient descent and stochastic gradient descent and recreates figure 6.5 from the book\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib import cm\n",
|
||||
"from matplotlib.colors import ListedColormap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create our training data 30 pairs {x_i, y_i}\n",
|
||||
"# We'll try to fit the Gabor model to these data\n",
|
||||
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
||||
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
||||
" -1.096e+01,4.073e-01,-9.467e+00,8.560e+00,1.062e+01,-1.729e-01,\n",
|
||||
" 1.040e+01,-1.261e+01,1.574e-01,-1.304e+01,-2.156e+00,-1.210e+01,\n",
|
||||
" -1.119e+01,2.902e+00,-8.220e+00,-1.179e+01,-8.391e+00,-4.505e+00],\n",
|
||||
" [-1.051e+00,-2.482e-02,8.896e-01,-4.943e-01,-9.371e-01,4.306e-01,\n",
|
||||
" 9.577e-03,-7.944e-02 ,1.624e-01,-2.682e-01,-3.129e-01,8.303e-01,\n",
|
||||
" -2.365e-02,5.098e-01,-2.777e-01,3.367e-01,1.927e-01,-2.222e-01,\n",
|
||||
" 6.352e-02,6.888e-03,3.224e-02,1.091e-02,-5.706e-01,-5.258e-02,\n",
|
||||
" -3.666e-02,1.709e-01,-4.805e-02,2.008e-01,-1.904e-01,5.952e-01]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4cRkrh9MZ58Z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's define our model\n",
|
||||
"def model(phi,x):\n",
|
||||
" sin_component = np.sin(phi[0] + 0.06 * phi[1] * x)\n",
|
||||
" gauss_component = np.exp(-(phi[0] + 0.06 * phi[1] * x) * (phi[0] + 0.06 * phi[1] * x) / 32)\n",
|
||||
" y_pred= sin_component * gauss_component\n",
|
||||
" return y_pred"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WQUERmb2erAe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw model\n",
|
||||
"def draw_model(data,model,phi,title=None):\n",
|
||||
" x_model = np.arange(-15,15,0.1)\n",
|
||||
" y_model = model(phi,x_model)\n",
|
||||
"\n",
|
||||
" fix, ax = plt.subplots()\n",
|
||||
" ax.plot(data[0,:],data[1,:],'bo')\n",
|
||||
" ax.plot(x_model,y_model,'m-')\n",
|
||||
" ax.set_xlim([-15,15]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||
" if title is not None:\n",
|
||||
" ax.set_title(title)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qFRe9POHF2le"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parmaeters and draw the model\n",
|
||||
"phi = np.zeros((2,1))\n",
|
||||
"phi[0] = -5 # Horizontal offset\n",
|
||||
"phi[1] = 25 # Frequency\n",
|
||||
"draw_model(data,model,phi, \"Initial parameters\")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TXx1Tpd1Tl-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now lets create compute the sum of squares loss for the training data"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QU5mdGvpTtEG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_loss(data_x, data_y, model, phi):\n",
|
||||
" # TODO -- Write this function -- replace the line below\n",
|
||||
" # TODO -- First make model predictions from data x\n",
|
||||
" # TODO -- Then compute the squared difference between the predictions and true y values\n",
|
||||
" # TODO -- Then sum them all and return\n",
|
||||
" loss = 0\n",
|
||||
"\n",
|
||||
" return loss"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "I7dqTY2Gg7CR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's just test that we got that right"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "eB5DQvU5hYNx"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"loss = compute_loss(data[0,:],data[1,:],model,np.array([[0.6],[-0.2]]))\n",
|
||||
"print('Your loss = %3.3f, Correct loss = %3.3f'%(loss, 16.419))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Ty05UtEEg9tc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's plot the whole loss function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F3trnavPiHpH"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def draw_loss_function(compute_loss, data, model, phi_iters = None):\n",
|
||||
" # Define pretty colormap\n",
|
||||
" my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||
" my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||
" r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||
" g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(freqs_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_loss(data[0,:], data[1,:], model, np.array([[offsets_mesh[idslope]], [slope]]))\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(offsets_mesh,freqs_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(offsets_mesh,freqs_mesh,loss_mesh,20,colors=['#80808080'])\n",
|
||||
" if phi_iters is not None:\n",
|
||||
" ax.plot(phi_iters[0,:], phi_iters[1,:],'go-')\n",
|
||||
" ax.set_ylim([2.5,22.5])\n",
|
||||
" ax.set_xlabel('Offset $\\phi_{0}$'); ax.set_ylabel('Frequency, $\\phi_{1}$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K-NTHpAAHlCl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"draw_loss_function(compute_loss, data, model)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "l8HbvIupnTME"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the gradient vector for a given set of parameters:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial L}{\\partial \\boldsymbol\\phi} = \\begin{bmatrix}\\frac{\\partial L}{\\partial \\phi_0} \\\\\\frac{\\partial L}{\\partial \\phi_1} \\end{bmatrix}.\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "s9Duf05WqqSC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# These came from writing out the expression for the sum of squares loss and taking the\n",
|
||||
"# derivative with respect to phi0 and phi1. It was a lot of hassle to get it right!\n",
|
||||
"def gabor_deriv_phi0(data_x,data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = cos_component * gauss_component - sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2* deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def gabor_deriv_phi1(data_x, data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = 0.06 * data_x * cos_component * gauss_component - 0.06 * data_x*sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2*deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def compute_gradient(data_x, data_y, phi):\n",
|
||||
" dl_dphi0 = gabor_deriv_phi0(data_x, data_y, phi[0],phi[1])\n",
|
||||
" dl_dphi1 = gabor_deriv_phi1(data_x, data_y, phi[0],phi[1])\n",
|
||||
" # Return the gradient\n",
|
||||
" return np.array([[dl_dphi0],[dl_dphi1]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UpswmkL2qwBT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We can check we got this right using a trick known as **finite differences**. If we evaluate the function and then change one of the parameters by a very small amount and normalize by that amount, we get an approximation to the gradient, so:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\frac{\\partial L}{\\partial \\phi_{0}}&\\approx & \\frac{L[\\phi_0+\\delta, \\phi_1]-L[\\phi_0, \\phi_1]}{\\delta}\\\\\n",
|
||||
"\\frac{\\partial L}{\\partial \\phi_{1}}&\\approx & \\frac{L[\\phi_0, \\phi_1+\\delta]-L[\\phi_0, \\phi_1]}{\\delta}\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"We can't do this when there are many parameters; for a million parameters, we would have to evaluate the loss function two million times, and usually computing the gradients directly is much more efficient."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RS1nEcYVuEAM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Compute the gradient using your function\n",
|
||||
"gradient = compute_gradient(data[0,:],data[1,:], phi)\n",
|
||||
"print(\"Your gradients: (%3.3f,%3.3f)\"%(gradient[0],gradient[1]))\n",
|
||||
"# Approximate the gradients with finite differences\n",
|
||||
"delta = 0.0001\n",
|
||||
"dl_dphi0_est = (compute_loss(data[0,:],data[1,:],model,phi+np.array([[delta],[0]])) - \\\n",
|
||||
" compute_loss(data[0,:],data[1,:],model,phi))/delta\n",
|
||||
"dl_dphi1_est = (compute_loss(data[0,:],data[1,:],model,phi+np.array([[0],[delta]])) - \\\n",
|
||||
" compute_loss(data[0,:],data[1,:],model,phi))/delta\n",
|
||||
"print(\"Approx gradients: (%3.3f,%3.3f)\"%(dl_dphi0_est,dl_dphi1_est))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QuwAHN7yt-gi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we are ready to perform gradient descent. We'll need to use our line search routine from Notebook 6.1, which I've reproduced here plus the helper function loss_function_1D that converts from a 2D problem to a 1D problem"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5EIjMM9Fw2eT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def loss_function_1D(dist_prop, data, model, phi_start, gradient):\n",
|
||||
" # Return the loss after moving this far\n",
|
||||
" return compute_loss(data[0,:], data[1,:], model, phi_start+ gradient * dist_prop)\n",
|
||||
"\n",
|
||||
"def line_search(data, model, phi, gradient, thresh=.00001, max_dist = 0.1, max_iter = 15, verbose=False):\n",
|
||||
" # Initialize four points along the range we are going to search\n",
|
||||
" a = 0\n",
|
||||
" b = 0.33 * max_dist\n",
|
||||
" c = 0.66 * max_dist\n",
|
||||
" d = 1.0 * max_dist\n",
|
||||
" n_iter =0;\n",
|
||||
"\n",
|
||||
" # While we haven't found the minimum closely enough\n",
|
||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||
" # Increment iteration counter (just to prevent an infinite loop)\n",
|
||||
" n_iter = n_iter+1\n",
|
||||
" # Calculate all four points\n",
|
||||
" lossa = loss_function_1D(a, data, model, phi,gradient)\n",
|
||||
" lossb = loss_function_1D(b, data, model, phi,gradient)\n",
|
||||
" lossc = loss_function_1D(c, data, model, phi,gradient)\n",
|
||||
" lossd = loss_function_1D(d, data, model, phi,gradient)\n",
|
||||
"\n",
|
||||
" if verbose:\n",
|
||||
" print('Iter %d, a=%3.3f, b=%3.3f, c=%3.3f, d=%3.3f'%(n_iter, a,b,c,d))\n",
|
||||
" print('a %f, b%f, c%f, d%f'%(lossa,lossb,lossc,lossd))\n",
|
||||
"\n",
|
||||
" # Rule #1 If point A is less than points B, C, and D then halve points B,C, and D\n",
|
||||
" if np.argmin((lossa,lossb,lossc,lossd))==0:\n",
|
||||
" b = b/2\n",
|
||||
" c = c/2\n",
|
||||
" d = d/2\n",
|
||||
" continue;\n",
|
||||
"\n",
|
||||
" # Rule #2 If point b is less than point c then\n",
|
||||
" # then point d becomes point c, and\n",
|
||||
" # point b becomes 1/3 between a and new d\n",
|
||||
" # point c becomes 2/3 between a and new d\n",
|
||||
" if lossb < lossc:\n",
|
||||
" d = c\n",
|
||||
" b = a+ (d-a)/3\n",
|
||||
" c = a+ 2*(d-a)/3\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" # Rule #2 If point c is less than point b then\n",
|
||||
" # then point a becomes point b, and\n",
|
||||
" # point b becomes 1/3 between new a and d\n",
|
||||
" # point c becomes 2/3 between new a and d\n",
|
||||
" a = b\n",
|
||||
" b = a+ (d-a)/3\n",
|
||||
" c = a+ 2*(d-a)/3\n",
|
||||
"\n",
|
||||
" # Return average of two middle points\n",
|
||||
" return (b+c)/2.0"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XrJ2gQjfw1XP"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def gradient_descent_step(phi, data, model):\n",
|
||||
" # Step 1: Compute the gradient\n",
|
||||
" gradient = compute_gradient(data[0,:],data[1,:], phi)\n",
|
||||
" # Step 2: Update the parameters -- note we want to search in the negative (downhill direction)\n",
|
||||
" alpha = line_search(data, model, phi, gradient*-1, max_dist = 2.0)\n",
|
||||
" phi = phi - alpha * gradient\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YVq6rmaWRD2M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 21\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = -1.5\n",
|
||||
"phi_all[1,0] = 8.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step(phi_all[:,c_step:c_step+1],data, model)\n",
|
||||
" # Measure loss and draw model every 4th step\n",
|
||||
" if c_step % 4 == 0:\n",
|
||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tOLd0gtdRLLS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Experiment with starting the optimization in the previous cell in different places\n",
|
||||
"# and show that it heads to a local minimum if we don't start it in the right valley"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Oi8ZlH0ptLqA"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def gradient_descent_step_fixed_learning_rate(phi, data, alpha):\n",
|
||||
" # TODO -- fill in this routine so that we take a fixed size step of size alpha without using line search\n",
|
||||
"\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4l-ueLk-oAxV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 21\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = -1.5\n",
|
||||
"phi_all[1,0] = 8.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step_fixed_learning_rate(phi_all[:,c_step:c_step+1],data, alpha =0.2)\n",
|
||||
" # Measure loss and draw model every 4th step\n",
|
||||
" if c_step % 4 == 0:\n",
|
||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "oi9MX_GRpM41"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Experiment with the learning rate, alpha.\n",
|
||||
"# What happens if you set it too large?\n",
|
||||
"# What happens if you set it too small?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "In6sQ5YCpMqn"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def stochastic_gradient_descent_step(phi, data, alpha, batch_size):\n",
|
||||
" # TODO -- fill in this routine so that we take a fixed size step of size alpha but only using a subset (batch) of the data\n",
|
||||
" # at each step\n",
|
||||
" # You can use the function np.random.permutation to generate a random permutation of the n_data = data.shape[1] indices\n",
|
||||
" # and then just choose the first n=batch_size of these indices. Then compute the gradient update\n",
|
||||
" # from just the data with these indices. More properly, you should sample with replacement, but this will do for now.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VKTC9-1Gpm3N"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the random number generator so you always get same numbers (disable if you don't want this)\n",
|
||||
"np.random.seed(1)\n",
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 41\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = 3.5\n",
|
||||
"phi_all[1,0] = 6.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = stochastic_gradient_descent_step(phi_all[:,c_step:c_step+1],data, alpha =0.8, batch_size=5)\n",
|
||||
" # Measure loss and draw model every 8th step\n",
|
||||
" if c_step % 8 == 0:\n",
|
||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "469OP_UHskJ4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Experiment with different learning rates, starting points, batch sizes, number of steps. Get a feel for this."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LxE2kTa3s29p"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Add a learning rate schedule. Reduce the learning rate by a factor of beta every M iterations"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lw4QPOaQTh5e"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
382
Notebooks/Chap06/6_4_Momentum.ipynb
Normal file
382
Notebooks/Chap06/6_4_Momentum.ipynb
Normal file
@@ -0,0 +1,382 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMLS4qeqBTVHGdg9Sds9jND",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap06/6_4_Momentum.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 6.4: Momentum**\n",
|
||||
"\n",
|
||||
"This notebook investigates the use of momentum as illustrated in figure 6.7 from the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib import cm\n",
|
||||
"from matplotlib.colors import ListedColormap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create our training data 30 pairs {x_i, y_i}\n",
|
||||
"# We'll try to fit the Gabor model to these data\n",
|
||||
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
||||
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
||||
" -1.096e+01,4.073e-01,-9.467e+00,8.560e+00,1.062e+01,-1.729e-01,\n",
|
||||
" 1.040e+01,-1.261e+01,1.574e-01,-1.304e+01,-2.156e+00,-1.210e+01,\n",
|
||||
" -1.119e+01,2.902e+00,-8.220e+00,-1.179e+01,-8.391e+00,-4.505e+00],\n",
|
||||
" [-1.051e+00,-2.482e-02,8.896e-01,-4.943e-01,-9.371e-01,4.306e-01,\n",
|
||||
" 9.577e-03,-7.944e-02 ,1.624e-01,-2.682e-01,-3.129e-01,8.303e-01,\n",
|
||||
" -2.365e-02,5.098e-01,-2.777e-01,3.367e-01,1.927e-01,-2.222e-01,\n",
|
||||
" 6.352e-02,6.888e-03,3.224e-02,1.091e-02,-5.706e-01,-5.258e-02,\n",
|
||||
" -3.666e-02,1.709e-01,-4.805e-02,2.008e-01,-1.904e-01,5.952e-01]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4cRkrh9MZ58Z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's define our model\n",
|
||||
"def model(phi,x):\n",
|
||||
" sin_component = np.sin(phi[0] + 0.06 * phi[1] * x)\n",
|
||||
" gauss_component = np.exp(-(phi[0] + 0.06 * phi[1] * x) * (phi[0] + 0.06 * phi[1] * x) / 32)\n",
|
||||
" y_pred= sin_component * gauss_component\n",
|
||||
" return y_pred"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WQUERmb2erAe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw model\n",
|
||||
"def draw_model(data,model,phi,title=None):\n",
|
||||
" x_model = np.arange(-15,15,0.1)\n",
|
||||
" y_model = model(phi,x_model)\n",
|
||||
"\n",
|
||||
" fix, ax = plt.subplots()\n",
|
||||
" ax.plot(data[0,:],data[1,:],'bo')\n",
|
||||
" ax.plot(x_model,y_model,'m-')\n",
|
||||
" ax.set_xlim([-15,15]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||
" if title is not None:\n",
|
||||
" ax.set_title(title)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qFRe9POHF2le"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parmaeters and draw the model\n",
|
||||
"phi = np.zeros((2,1))\n",
|
||||
"phi[0] = -5 # Horizontal offset\n",
|
||||
"phi[1] = 25 # Frequency\n",
|
||||
"draw_model(data,model,phi, \"Initial parameters\")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TXx1Tpd1Tl-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now lets compute the sum of squares loss for the training data and plot the loss function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QU5mdGvpTtEG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_loss(data_x, data_y, model, phi):\n",
|
||||
" pred_y = model(phi, data_x)\n",
|
||||
" loss = np.sum((pred_y-data_y)*(pred_y-data_y))\n",
|
||||
" return loss\n",
|
||||
"\n",
|
||||
"def draw_loss_function(compute_loss, data, model, phi_iters = None):\n",
|
||||
" # Define pretty colormap\n",
|
||||
" my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||
" my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||
" r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||
" g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(freqs_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_loss(data[0,:], data[1,:], model, np.array([[offsets_mesh[idslope]], [slope]]))\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(offsets_mesh,freqs_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(offsets_mesh,freqs_mesh,loss_mesh,20,colors=['#80808080'])\n",
|
||||
" if phi_iters is not None:\n",
|
||||
" ax.plot(phi_iters[0,:], phi_iters[1,:],'go-')\n",
|
||||
" ax.set_ylim([2.5,22.5])\n",
|
||||
" ax.set_xlabel('Offset $\\phi_{0}$'); ax.set_ylabel('Frequency, $\\phi_{1}$')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"draw_loss_function(compute_loss, data, model)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "I7dqTY2Gg7CR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"As before, we compute the gradient vector for a given set of parameters:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial L}{\\partial \\boldsymbol\\phi} = \\begin{bmatrix}\\frac{\\partial L}{\\partial \\phi_0} \\\\\\frac{\\partial L}{\\partial \\phi_1} \\end{bmatrix}.\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "s9Duf05WqqSC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# These came from writing out the expression for the sum of squares loss and taking the\n",
|
||||
"# derivative with respect to phi0 and phi1. It was a lot of hassle to get it right!\n",
|
||||
"def gabor_deriv_phi0(data_x,data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = cos_component * gauss_component - sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2* deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def gabor_deriv_phi1(data_x, data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = 0.06 * data_x * cos_component * gauss_component - 0.06 * data_x*sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2*deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def compute_gradient(data_x, data_y, phi):\n",
|
||||
" dl_dphi0 = gabor_deriv_phi0(data_x, data_y, phi[0],phi[1])\n",
|
||||
" dl_dphi1 = gabor_deriv_phi1(data_x, data_y, phi[0],phi[1])\n",
|
||||
" # Return the gradient\n",
|
||||
" return np.array([[dl_dphi0],[dl_dphi1]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UpswmkL2qwBT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's first run standard stochastic gradient descent."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7Tv3d4zqAdZR"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the random number generator so you always get same numbers (disable if you don't want this)\n",
|
||||
"np.random.seed(1)\n",
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 81\n",
|
||||
"batch_size = 5\n",
|
||||
"alpha = 0.6\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = -1.5\n",
|
||||
"phi_all[1,0] = 6.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Choose random batch indices\n",
|
||||
" batch_index = np.random.permutation(data.shape[1])[0:batch_size]\n",
|
||||
" # Compute the gradient\n",
|
||||
" gradient = compute_gradient(data[0,batch_index], data[1,batch_index], phi_all[:,c_step:c_step+1] )\n",
|
||||
" # Update the parameters\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = phi_all[:,c_step:c_step+1] - alpha * gradient\n",
|
||||
"\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
"draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "469OP_UHskJ4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's add momentum (equation 6.11)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nMILovgMFpdI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the random number generator so you always get same numbers (disable if you don't want this)\n",
|
||||
"np.random.seed(1)\n",
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 81\n",
|
||||
"batch_size = 5\n",
|
||||
"alpha = 0.6\n",
|
||||
"beta = 0.6\n",
|
||||
"momentum = np.zeros([2,1])\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = -1.5\n",
|
||||
"phi_all[1,0] = 6.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Choose random batch indices\n",
|
||||
" batch_index = np.random.permutation(data.shape[1])[0:batch_size]\n",
|
||||
" # Compute the gradient\n",
|
||||
" gradient = compute_gradient(data[0,batch_index], data[1,batch_index], phi_all[:,c_step:c_step+1])\n",
|
||||
" # TODO -- calculate momentum - replace the line below\n",
|
||||
" momentum = np.zeros([2,1])\n",
|
||||
"\n",
|
||||
" # Update the parameters\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = phi_all[:,c_step:c_step+1] - alpha * momentum\n",
|
||||
"\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
"draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dWBU8ZbSFny9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, we'll try Nesterov momentum"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nYIAomA-KPkU"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set the random number generator so you always get same numbers (disable if you don't want this)\n",
|
||||
"np.random.seed(1)\n",
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 81\n",
|
||||
"batch_size = 5\n",
|
||||
"alpha = 0.6\n",
|
||||
"beta = 0.6\n",
|
||||
"momentum = np.zeros([2,1])\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = -1.5\n",
|
||||
"phi_all[1,0] = 6.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Choose random batch indices\n",
|
||||
" batch_index = np.random.permutation(data.shape[1])[0:batch_size]\n",
|
||||
" # TODO -- calculate Nesterov momentum - replace the lines below\n",
|
||||
" gradient = np.zeros([2,1])\n",
|
||||
" momentum = np.zeros([2,1])\n",
|
||||
"\n",
|
||||
" # Update the parameters\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = phi_all[:,c_step:c_step+1] - alpha * momentum\n",
|
||||
" # Measure loss and draw model every 8th step\n",
|
||||
"\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
"draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"draw_loss_function(compute_loss, data, model,phi_all)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "XtwWeCZ5HLLh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
288
Notebooks/Chap06/6_5_Adam.ipynb
Normal file
288
Notebooks/Chap06/6_5_Adam.ipynb
Normal file
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNFsCOnucz1nQt7PBEnKeTV",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap06/6_5_Adam.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 6.5: Adam**\n",
|
||||
"\n",
|
||||
"This notebook investigates the Adam algorithm as illustrated in figure 6.9 from the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ysg9OHZq07YC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Hi_t5nCk01tx"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib.colors import ListedColormap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define function that we wish to find the minimum of (normally would be defined implicitly by data and loss)\n",
|
||||
"def loss(phi0, phi1):\n",
|
||||
" height = np.exp(-0.5 * (phi1 * phi1)*4.0)\n",
|
||||
" height = height * np. exp(-0.5* (phi0-0.7) *(phi0-0.7)/4.0)\n",
|
||||
" return 1.0-height\n",
|
||||
"\n",
|
||||
"# Compute the gradients of this function (for simplicity, I just used finite differences)\n",
|
||||
"def get_loss_gradient(phi0, phi1):\n",
|
||||
" delta_phi = 0.00001;\n",
|
||||
" gradient = np.zeros((2,1));\n",
|
||||
" gradient[0] = (loss(phi0+delta_phi/2.0, phi1) - loss(phi0-delta_phi/2.0, phi1))/delta_phi\n",
|
||||
" gradient[1] = (loss(phi0, phi1+delta_phi/2.0) - loss(phi0, phi1-delta_phi/2.0))/delta_phi\n",
|
||||
" return gradient[:,0];\n",
|
||||
"\n",
|
||||
"# Compute the loss function at a range of values of phi0 and phi1 for plotting\n",
|
||||
"def get_loss_function_for_plot():\n",
|
||||
" grid_values = np.arange(-1.0,1.0,0.01);\n",
|
||||
" phi0mesh, phi1mesh = np.meshgrid(grid_values, grid_values)\n",
|
||||
" loss_function = np.zeros((grid_values.size, grid_values.size))\n",
|
||||
" for idphi0, phi0 in enumerate(grid_values):\n",
|
||||
" for idphi1, phi1 in enumerate(grid_values):\n",
|
||||
" loss_function[idphi0, idphi1] = loss(phi1,phi0)\n",
|
||||
" return loss_function, phi0mesh, phi1mesh"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GTrgOKhp16zw"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define fancy colormap\n",
|
||||
"my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||
"my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||
"r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||
"g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||
"b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||
"my_colormap_vals = np.vstack((r,g,b)).transpose()/255.0\n",
|
||||
"my_colormap = ListedColormap(my_colormap_vals)\n",
|
||||
"\n",
|
||||
"# Plotting function\n",
|
||||
"def draw_function(phi0mesh, phi1mesh, loss_function, my_colormap, opt_path):\n",
|
||||
" fig = plt.figure();\n",
|
||||
" ax = plt.axes();\n",
|
||||
" fig.set_size_inches(7,7)\n",
|
||||
" ax.contourf(phi0mesh, phi1mesh, loss_function, 256, cmap=my_colormap);\n",
|
||||
" ax.contour(phi0mesh, phi1mesh, loss_function, 20, colors=['#80808080'])\n",
|
||||
" ax.plot(opt_path[0,:], opt_path[1,:],'-', color='#a0d9d3ff')\n",
|
||||
" ax.plot(opt_path[0,:], opt_path[1,:],'.', color='#a0d9d3ff',markersize=10)\n",
|
||||
" ax.set_xlabel(\"$\\phi_{0}$\")\n",
|
||||
" ax.set_ylabel(\"$\\phi_1}$\")\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YKijFyuH4ZJD"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Simple fixed step size gradient descent\n",
|
||||
"def grad_descent(start_posn, n_steps, alpha):\n",
|
||||
" grad_path = np.zeros((2, n_steps+1));\n",
|
||||
" grad_path[:,0] = start_posn[:,0];\n",
|
||||
" for c_step in range(n_steps):\n",
|
||||
" this_grad = get_loss_gradient(grad_path[0,c_step], grad_path[1,c_step]);\n",
|
||||
" grad_path[:,c_step+1] = grad_path[:,c_step] - alpha * this_grad\n",
|
||||
" return grad_path;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Afxr7RqR8s7Q"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We'll start by running gradient descent with a fixed step size for this loss function."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MXZL8lu3-EUF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"loss_function, phi0mesh, phi1mesh = get_loss_function_for_plot() ;\n",
|
||||
"\n",
|
||||
"start_posn = np.zeros((2,1));\n",
|
||||
"start_posn[0,0] = -0.7; start_posn[1,0] = -0.9\n",
|
||||
"\n",
|
||||
"# Run gradient descent\n",
|
||||
"grad_path1 = grad_descent(start_posn, n_steps=200, alpha = 0.08)\n",
|
||||
"draw_function(phi0mesh, phi1mesh, loss_function, my_colormap, grad_path1)\n",
|
||||
"grad_path2 = grad_descent(start_posn, n_steps=40, alpha= 1.0)\n",
|
||||
"draw_function(phi0mesh, phi1mesh, loss_function, my_colormap, grad_path2)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fgkwVEal8stH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Because the function changes much faster in $\\phi_1$ than in $\\phi_0$, there is no great step size to choose. If we set the step size so that it makes sensible progress in the $\\phi_1$, then it takes many iterations to converge. If we set the step size tso that we make sensible progress in the $\\phi_{0}$ direction, then the path oscillates in the $\\phi_1$ direction. \n",
|
||||
"\n",
|
||||
"This motivates Adam. At the core of Adam is the idea that we should just determine which way is downhill along each axis (i.e. left/right for $\\phi_0$ or up/down for $\\phi_1$) and move a fixed distance in that direction."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AN2uNxaa-bRX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def normalized_gradients(start_posn, n_steps, alpha, epsilon=1e-20):\n",
|
||||
" grad_path = np.zeros((2, n_steps+1));\n",
|
||||
" grad_path[:,0] = start_posn[:,0];\n",
|
||||
" for c_step in range(n_steps):\n",
|
||||
" # Measure the gradient as in equation 6.13 (first line)\n",
|
||||
" m = get_loss_gradient(grad_path[0,c_step], grad_path[1,c_step]);\n",
|
||||
" # TO DO -- compute the squared gradient as in equation 6.13 (second line)\n",
|
||||
" # Replace this line:\n",
|
||||
" v = np.ones_like(grad_path[:,0])\n",
|
||||
"\n",
|
||||
" # TO DO -- apply the update rule (equation 6.14)\n",
|
||||
" # Replace this line:\n",
|
||||
" grad_path[:,c_step+1] = grad_path[:,c_step]\n",
|
||||
"\n",
|
||||
" return grad_path;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IqX2zP_29gLF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's try out normalized gradients\n",
|
||||
"start_posn = np.zeros((2,1));\n",
|
||||
"start_posn[0,0] = -0.7; start_posn[1,0] = -0.9\n",
|
||||
"\n",
|
||||
"# Run gradient descent\n",
|
||||
"grad_path1 = normalized_gradients(start_posn, n_steps=40, alpha = 0.08)\n",
|
||||
"draw_function(phi0mesh, phi1mesh, loss_function, my_colormap, grad_path1)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wxe-dKW5Chv3"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"This moves towards the minimum at a sensible speed, but we never actually converge -- the solution just bounces back and forth between the last two points. To make it converge, we add momentum to both the estimates of the gradient and the pointwise squared gradient. We also modify the statistics by a factor that depends on the time to make sure the progress is now slow to start with."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_6KoKBJdGGI4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def adam(start_posn, n_steps, alpha, beta=0.9, gamma=0.99, epsilon=1e-20):\n",
|
||||
" grad_path = np.zeros((2, n_steps+1));\n",
|
||||
" grad_path[:,0] = start_posn[:,0];\n",
|
||||
" m = np.zeros_like(grad_path[:,0])\n",
|
||||
" v = np.zeros_like(grad_path[:,0])\n",
|
||||
" for c_step in range(n_steps):\n",
|
||||
" # Measure the gradient\n",
|
||||
" grad = get_loss_gradient(grad_path[0,c_step], grad_path[1,c_step])\n",
|
||||
" # TODO -- Update the momentum based gradient estimate equation 6.15 (first line)\n",
|
||||
" # Replace this line:\n",
|
||||
" m = m;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # TODO -- update the momentum based squared gradient estimate as in equation 6.15 (second line)\n",
|
||||
" # Replace this line:\n",
|
||||
" v = v\n",
|
||||
"\n",
|
||||
" # TODO -- Modify the statistics according to euation 6.16\n",
|
||||
" # You will need the function np.power\n",
|
||||
" # Replace these lines\n",
|
||||
" m_tilde = m\n",
|
||||
" v_tilde = v\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # TO DO -- apply the update rule (equation 6.17)\n",
|
||||
" # Replace this line:\n",
|
||||
" grad_path[:,c_step+1] = grad_path[:,c_step]\n",
|
||||
"\n",
|
||||
" return grad_path;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BKUhZSGgDEm0"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's try out our Adam algorithm\n",
|
||||
"start_posn = np.zeros((2,1));\n",
|
||||
"start_posn[0,0] = -0.7; start_posn[1,0] = -0.9\n",
|
||||
"\n",
|
||||
"# Run gradient descent\n",
|
||||
"grad_path1 = adam(start_posn, n_steps=60, alpha = 0.05)\n",
|
||||
"draw_function(phi0mesh, phi1mesh, loss_function, my_colormap, grad_path1)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sg5X18P3IbYo"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
415
Notebooks/Chap07/7_1_Backpropagation_in_Toy_Model.ipynb
Normal file
415
Notebooks/Chap07/7_1_Backpropagation_in_Toy_Model.ipynb
Normal file
@@ -0,0 +1,415 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyP5wHK5E7/el+vxU947K3q8",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap07/7_1_Backpropagation_in_Toy_Model.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 7.1: Backpropagation in Toy Model**\n",
|
||||
"\n",
|
||||
"This notebook computes the derivatives of the toy function discussed in section 7.3 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pOZ6Djz0dhoy"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We're going to investigate how to take the derivatives of functions where one operation is composed with another, which is composed with a third and so on. For example, consider the model:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" \\mbox{f}[x,\\boldsymbol\\phi] = \\beta_3+\\omega_3\\cdot\\cos\\Bigl[\\beta_2+\\omega_2\\cdot\\exp\\bigl[\\beta_1+\\omega_1\\cdot\\sin[\\beta_0+\\omega_0x]\\bigr]\\Bigr],\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"with parameters $\\boldsymbol\\phi=\\{\\beta_0,\\omega_0,\\beta_1,\\omega_1,\\beta_2,\\omega_2,\\beta_3,\\omega_3\\}$.<br>\n",
|
||||
"\n",
|
||||
"This is a composition of the functions $\\cos[\\bullet],\\exp[\\bullet],\\sin[\\bullet]$. I chose these just because you probably already know the derivatives of these functions:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray*}\n",
|
||||
" \\frac{\\partial \\cos[z]}{\\partial z} = -\\sin[z] \\quad\\quad \\frac{\\partial \\exp[z]}{\\partial z} = \\exp[z] \\quad\\quad \\frac{\\partial \\sin[z]}{\\partial z} = \\cos[z].\n",
|
||||
"\\end{eqnarray*}\n",
|
||||
"\n",
|
||||
"Suppose that we have a least squares loss function:\n",
|
||||
"\n",
|
||||
"\\begin{equation*}\n",
|
||||
"\\ell_i = (\\mbox{f}[x_i,\\boldsymbol\\phi]-y_i)^2,\n",
|
||||
"\\end{equation*}\n",
|
||||
"\n",
|
||||
"Assume that we know the current values of $\\beta_{0},\\beta_{1},\\beta_{2},\\beta_{3},\\omega_{0},\\omega_{1},\\omega_{2},\\omega_{3}$, $x_i$ and $y_i$. We could obviously calculate $\\ell_i$. But we also want to know how $\\ell_i$ changes when we make a small change to $\\beta_{0},\\beta_{1},\\beta_{2},\\beta_{3},\\omega_{0},\\omega_{1},\\omega_{2}$, or $\\omega_{3}$. In other words, we want to compute the eight derivatives:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray*}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial \\beta_{0}}, \\quad \\frac{\\partial \\ell_i}{\\partial \\beta_{1}}, \\quad \\frac{\\partial \\ell_i}{\\partial \\beta_{2}}, \\quad \\frac{\\partial \\ell_i }{\\partial \\beta_{3}}, \\quad \\frac{\\partial \\ell_i}{\\partial \\omega_{0}}, \\quad \\frac{\\partial \\ell_i}{\\partial \\omega_{1}}, \\quad \\frac{\\partial \\ell_i}{\\partial \\omega_{2}}, \\quad\\mbox{and} \\quad \\frac{\\partial \\ell_i}{\\partial \\omega_{3}}.\n",
|
||||
"\\end{eqnarray*}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1DmMo2w63CmT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# import library\n",
|
||||
"import numpy as np"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RIPaoVN834Lj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's first define the original function for $y$ and the likelihood term:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "32-ufWhc3v2c"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "AakK_qen3BpU"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def fn(x, beta0, beta1, beta2, beta3, omega0, omega1, omega2, omega3):\n",
|
||||
" return beta3+omega3 * np.cos(beta2 + omega2 * np.exp(beta1 + omega1 * np.sin(beta0 + omega0 * x)))\n",
|
||||
"\n",
|
||||
"def likelihood(x, y, beta0, beta1, beta2, beta3, omega0, omega1, omega2, omega3):\n",
|
||||
" diff = fn(x, beta0, beta1, beta2, beta3, omega0, omega1, omega2, omega3) - y\n",
|
||||
" return diff * diff"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll choose some values for the betas and the omegas and x and compute the output of the function:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "y7tf0ZMt5OXt"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"beta0 = 1.0; beta1 = 2.0; beta2 = -3.0; beta3 = 0.4\n",
|
||||
"omega0 = 0.1; omega1 = -0.4; omega2 = 2.0; omega3 = 3.0\n",
|
||||
"x = 2.3; y =2.0\n",
|
||||
"l_i_func = likelihood(x,y,beta0,beta1,beta2,beta3,omega0,omega1,omega2,omega3)\n",
|
||||
"print('l_i=%3.3f'%l_i_func)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pwvOcCxr41X_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Computing derivatives by hand\n",
|
||||
"\n",
|
||||
"We could compute expressions for the derivatives by hand and write code to compute them directly but some have very complex expressions, even for this relatively simple original equation. For example:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray*}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial \\omega_{0}} &=& -2 \\left( \\beta_3+\\omega_3\\cdot\\cos\\Bigl[\\beta_2+\\omega_2\\cdot\\exp\\bigl[\\beta_1+\\omega_1\\cdot\\sin[\\beta_0+\\omega_0\\cdot x_i]\\bigr]\\Bigr]-y_i\\right)\\nonumber \\\\\n",
|
||||
"&&\\hspace{0.5cm}\\cdot \\omega_1\\omega_2\\omega_3\\cdot x_i\\cdot\\cos[\\beta_0+\\omega_0 \\cdot x_i]\\cdot\\exp\\Bigl[\\beta_1 + \\omega_1 \\cdot \\sin[\\beta_0+\\omega_0\\cdot x_i]\\Bigr]\\nonumber\\\\\n",
|
||||
"&& \\hspace{1cm}\\cdot \\sin\\biggl[\\beta_2+\\omega_2\\cdot \\exp\\Bigl[\\beta_1 + \\omega_1 \\cdot \\sin[\\beta_0+\\omega_0\\cdot x_i]\\Bigr]\\biggr].\n",
|
||||
"\\end{eqnarray*}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "u5w69NeT64yV"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"dldbeta3_func = 2 * (beta3 +omega3 * np.cos(beta2 + omega2 * np.exp(beta1+omega1 * np.sin(beta0+omega0 * x)))-y)\n",
|
||||
"dldomega0_func = -2 *(beta3 +omega3 * np.cos(beta2 + omega2 * np.exp(beta1+omega1 * np.sin(beta0+omega0 * x)))-y) * \\\n",
|
||||
" omega1 * omega2 * omega3 * x * np.cos(beta0 + omega0 * x) * np.exp(beta1 +omega1 * np.sin(beta0 + omega0 * x)) *\\\n",
|
||||
" np.sin(beta2 + omega2 * np.exp(beta1+ omega1* np.sin(beta0+omega0 * x)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7t22hALp5zkq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's make sure this is correct using finite differences:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "iRh4hnu3-H3n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"dldomega0_fd = (likelihood(x,y,beta0,beta1,beta2,beta3,omega0+0.00001,omega1,omega2,omega3)-likelihood(x,y,beta0,beta1,beta2,beta3,omega0,omega1,omega2,omega3))/0.00001\n",
|
||||
"\n",
|
||||
"print('dydomega0: Function value = %3.3f, Finite difference value = %3.3f'%(dldomega0_func,dldomega0_fd))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1O3XmXMx-HlZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The code to calculate $\\partial l_i/ \\partial \\omega_0$ is a bit of a nightmare. It's easy to make mistakes, and you can see that some parts of it are repeated (for example, the $\\sin[\\bullet]$ term), which suggests some kind of redundancy in the calculations. The goal of this practical is to compute the derivatives in a much simpler way. There will be three steps:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wS4IPjZAKWTN"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Step 1:** Write the original equations as a series of intermediate calculations.\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"f_{0} &=& \\beta_{0} + \\omega_{0} x_i\\nonumber\\\\\n",
|
||||
"h_{1} &=& \\sin[f_{0}]\\nonumber\\\\\n",
|
||||
"f_{1} &=& \\beta_{1} + \\omega_{1}h_{1}\\nonumber\\\\\n",
|
||||
"h_{2} &=& \\exp[f_{1}]\\nonumber\\\\\n",
|
||||
"f_{2} &=& \\beta_{2} + \\omega_{2} h_{2}\\nonumber\\\\\n",
|
||||
"h_{3} &=& \\cos[f_{2}]\\nonumber\\\\\n",
|
||||
"f_{3} &=& \\beta_{3} + \\omega_{3}h_{3}\\nonumber\\\\\n",
|
||||
"l_i &=& (f_3-y_i)^2\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"and compute and store the values of all of these intermediate values. We'll need them to compute the derivatives.<br> This is called the **forward pass**."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8UWhvDeNDudz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO compute all the f_k and h_k terms\n",
|
||||
"# Replace the code below\n",
|
||||
"\n",
|
||||
"f0 = 0\n",
|
||||
"h1 = 0\n",
|
||||
"f1 = 0\n",
|
||||
"h2 = 0\n",
|
||||
"f2 = 0\n",
|
||||
"h3 = 0\n",
|
||||
"f3 = 0\n",
|
||||
"l_i = 0\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZWKAq6HC90qV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's check we got that right:\n",
|
||||
"print(\"f0: true value = %3.3f, your value = %3.3f\"%(1.230, f0))\n",
|
||||
"print(\"h1: true value = %3.3f, your value = %3.3f\"%(0.942, h1))\n",
|
||||
"print(\"f1: true value = %3.3f, your value = %3.3f\"%(1.623, f1))\n",
|
||||
"print(\"h2: true value = %3.3f, your value = %3.3f\"%(5.068, h2))\n",
|
||||
"print(\"f2: true value = %3.3f, your value = %3.3f\"%(7.137, f2))\n",
|
||||
"print(\"h3: true value = %3.3f, your value = %3.3f\"%(0.657, h3))\n",
|
||||
"print(\"f3: true value = %3.3f, your value = %3.3f\"%(2.372, f3))\n",
|
||||
"print(\"like original = %3.3f, like from forward pass = %3.3f\"%(l_i_func, l_i))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ibxXw7TUW4Sx"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Step 2:** Compute the derivatives of $y$ with respect to the intermediate quantities that we just calculated, but in reverse order:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\quad \\frac{\\partial \\ell_i}{\\partial f_3}, \\quad \\frac{\\partial \\ell_i}{\\partial h_3}, \\quad \\frac{\\partial \\ell_i}{\\partial f_2}, \\quad\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial h_2}, \\quad \\frac{\\partial \\ell_i}{\\partial f_1}, \\quad \\frac{\\partial \\ell_i}{\\partial h_1}, \\quad\\mbox{and} \\quad \\frac{\\partial \\ell_i}{\\partial f_0}.\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"The first of these derivatives is straightforward:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial f_{3}} = 2 (f_3-y).\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"The second derivative can be calculated using the chain rule:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial h_{3}} =\\frac{\\partial f_{3}}{\\partial h_{3}} \\frac{\\partial \\ell_i}{\\partial f_{3}} .\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"The left-hand side asks how $\\ell_i$ changes when $h_{3}$ changes. The right-hand side says we can decompose this into (i) how $ell_i$ changes when $f_{3}$ changes and how $f_{3}$ changes when $h_{3}$ changes. So you get a chain of events happening: $h_{3}$ changes $f_{3}$, which changes $\\ell_i$, and the derivatives represent the effects of this chain. Notice that we computed the first of these derivatives already and is $2 (f_3-y)$. We calculated $f_{3}$ in step 1. The second term is the derivative of $\\beta_{3} + \\omega_{3}h_{3}$ with respect to $h_3$ which is simply $\\omega_3$. \n",
|
||||
"\n",
|
||||
"We can continue in this way, computing the derivatives of the output with respect to these intermediate quantities:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial f_{2}} &=& \\frac{\\partial h_{3}}{\\partial f_{2}}\\left(\n",
|
||||
"\\frac{\\partial f_{3}}{\\partial h_{3}}\\frac{\\partial \\ell_i}{\\partial f_{3}} \\right)\n",
|
||||
"\\nonumber \\\\\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial h_{2}} &=& \\frac{\\partial f_{2}}{\\partial h_{2}}\\left(\\frac{\\partial h_{3}}{\\partial f_{2}}\\frac{\\partial f_{3}}{\\partial h_{3}}\\frac{\\partial \\ell_i}{\\partial f_{3}}\\right)\\nonumber \\\\\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial f_{1}} &=& \\frac{\\partial h_{2}}{\\partial f_{1}}\\left( \\frac{\\partial f_{2}}{\\partial h_{2}}\\frac{\\partial h_{3}}{\\partial f_{2}}\\frac{\\partial f_{3}}{\\partial h_{3}}\\frac{\\partial \\ell_i}{\\partial f_{3}} \\right)\\nonumber \\\\\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial h_{1}} &=& \\frac{\\partial f_{1}}{\\partial h_{1}}\\left(\\frac{\\partial h_{2}}{\\partial f_{1}} \\frac{\\partial f_{2}}{\\partial h_{2}}\\frac{\\partial h_{3}}{\\partial f_{2}}\\frac{\\partial f_{3}}{\\partial h_{3}}\\frac{\\partial \\ell_i}{\\partial f_{3}} \\right)\\nonumber \\\\\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial f_{0}} &=& \\frac{\\partial h_{1}}{\\partial f_{0}}\\left(\\frac{\\partial f_{1}}{\\partial h_{1}}\\frac{\\partial h_{2}}{\\partial f_{1}} \\frac{\\partial f_{2}}{\\partial h_{2}}\\frac{\\partial h_{3}}{\\partial f_{2}}\\frac{\\partial f_{3}}{\\partial h_{3}}\\frac{\\partial \\ell_i}{\\partial f_{3}} \\right).\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"In each case, we have already computed all of the terms except the last one in the previous step, and the last term is simple to evaluate. This is called the **backward pass**."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jay8NYWdFHuZ"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Compute the derivatives of the output with respect\n",
|
||||
"# to the intermediate computations h_k and f_k (i.e, run the backward pass)\n",
|
||||
"# I've done the first two for you. You replace the code below:\n",
|
||||
"dldf3 = 2* (f3 - y)\n",
|
||||
"dldh3 = omega3 * dldf3\n",
|
||||
"# Replace the code below\n",
|
||||
"dldf2 = 1\n",
|
||||
"dldh2 = 1\n",
|
||||
"dldf1 = 1\n",
|
||||
"dldh1 = 1\n",
|
||||
"dldf0 = 1\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gCQJeI--Egdl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's check we got that right\n",
|
||||
"print(\"dldf3: true value = %3.3f, your value = %3.3f\"%(0.745, dldf3))\n",
|
||||
"print(\"dldh3: true value = %3.3f, your value = %3.3f\"%(2.234, dldh3))\n",
|
||||
"print(\"dldf2: true value = %3.3f, your value = %3.3f\"%(-1.683, dldf2))\n",
|
||||
"print(\"dldh2: true value = %3.3f, your value = %3.3f\"%(-3.366, dldh2))\n",
|
||||
"print(\"dldf1: true value = %3.3f, your value = %3.3f\"%(-17.060, dldf1))\n",
|
||||
"print(\"dldh1: true value = %3.3f, your value = %3.3f\"%(6.824, dldh1))\n",
|
||||
"print(\"dldf0: true value = %3.3f, your value = %3.3f\"%(2.281, dldf0))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dS1OrLtlaFr7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**Step 3:** Finally, we consider how the loss~$\\ell_{i}$ changes when we change the parameters $\\beta_{\\bullet}$ and $\\omega_{\\bullet}$. Once more, we apply the chain rule:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial \\beta_{k}} &=& \\frac{\\partial f_{k}}{\\partial \\beta_{k}}\\frac{\\partial \\ell_i}{\\partial f_{k}}\\nonumber \\\\\n",
|
||||
"\\frac{\\partial \\ell_i}{\\partial \\omega_{k}} &=& \\frac{\\partial f_{k}}{\\partial \\omega_{k}}\\frac{\\partial \\ell_i}{\\partial f_{k}}.\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"\\noindent In each case, the second term on the right-hand side was computed in step 2. When $k>0$, we have~$f_{k}=\\beta_{k}+\\omega_k \\cdot h_{k}$, so:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\frac{\\partial f_{k}}{\\partial \\beta_{k}} = 1 \\quad\\quad\\mbox{and}\\quad \\quad \\frac{\\partial f_{k}}{\\partial \\omega_{k}} &=& h_{k}.\n",
|
||||
"\\end{eqnarray}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FlzlThQPGpkU"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Calculate the final derivatives with respect to the beta and omega terms\n",
|
||||
"\n",
|
||||
"dldbeta3 = 1\n",
|
||||
"dldomega3 = 1\n",
|
||||
"dldbeta2 = 1\n",
|
||||
"dldomega2 = 1\n",
|
||||
"dldbeta1 = 1\n",
|
||||
"dldomega1 = 1\n",
|
||||
"dldbeta0 = 1\n",
|
||||
"dldomega0 = 1\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1I2BhqZhGMK6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's check we got them right\n",
|
||||
"print('dldbeta3: Your value = %3.3f, True value = %3.3f'%(dldbeta3, 0.745))\n",
|
||||
"print('dldomega3: Your value = %3.3f, True value = %3.3f'%(dldomega3, 0.489))\n",
|
||||
"print('dldbeta2: Your value = %3.3f, True value = %3.3f'%(dldbeta2, -1.683))\n",
|
||||
"print('dldomega2: Your value = %3.3f, True value = %3.3f'%(dldomega2, -8.530))\n",
|
||||
"print('dldbeta1: Your value = %3.3f, True value = %3.3f'%(dldbeta1, -17.060))\n",
|
||||
"print('dldomega1: Your value = %3.3f, True value = %3.3f'%(dldomega1, -16.079))\n",
|
||||
"print('dldbeta0: Your value = %3.3f, True value = %3.3f'%(dldbeta0, 2.281))\n",
|
||||
"print('dldomega0: Your value = %3.3f, Function value = %3.3f, Finite difference value = %3.3f'%(dldomega0, dldomega0_func, dldomega0_fd))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "38eiOn2aHgHI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Using this method, we can compute the derivatives quite easily without needing to compute very complicated expressions. In the next practical, we'll apply this same method to a deep neural network."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "N2ZhrR-2fNa1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
345
Notebooks/Chap07/7_2_Backpropagation.ipynb
Normal file
345
Notebooks/Chap07/7_2_Backpropagation.ipynb
Normal file
@@ -0,0 +1,345 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyN2nPVR0imZntgj4Oasyvmo",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap07/7_2_Backpropagation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 7.2: Backpropagation**\n",
|
||||
"\n",
|
||||
"This notebook runs the backpropagation algorithm on a deep neural network as described in section 7.4 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L6chybAVFJW2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "LdIDglk1FFcG"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First let's define a neural network. We'll just choose the weights and biases randomly for now"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nnUoI0m6GyjC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set seed so we always get the same random numbers\n",
|
||||
"np.random.seed(0)\n",
|
||||
"\n",
|
||||
"# Number of layers\n",
|
||||
"K = 5\n",
|
||||
"# Number of neurons per layer\n",
|
||||
"D = 6\n",
|
||||
"# Input layer\n",
|
||||
"D_i = 1\n",
|
||||
"# Output layer\n",
|
||||
"D_o = 1\n",
|
||||
"\n",
|
||||
"# Make empty lists\n",
|
||||
"all_weights = [None] * (K+1)\n",
|
||||
"all_biases = [None] * (K+1)\n",
|
||||
"\n",
|
||||
"# Create input and output layers\n",
|
||||
"all_weights[0] = np.random.normal(size=(D, D_i))\n",
|
||||
"all_weights[-1] = np.random.normal(size=(D_o, D))\n",
|
||||
"all_biases[0] = np.random.normal(size =(D,1))\n",
|
||||
"all_biases[-1]= np.random.normal(size =(D_o,1))\n",
|
||||
"\n",
|
||||
"# Create intermediate layers\n",
|
||||
"for layer in range(1,K):\n",
|
||||
" all_weights[layer] = np.random.normal(size=(D,D))\n",
|
||||
" all_biases[layer] = np.random.normal(size=(D,1))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WVM4Tc_jGI0Q"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jZh-7bPXIDq4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's run our random network. The weight matrices $\\boldsymbol\\Omega_{1\\ldots K}$ are the entries of the list \"all_weights\" and the biases $\\boldsymbol\\beta_{1\\ldots k}$ are the entries of the list \"all_biases\"\n",
|
||||
"\n",
|
||||
"We know that we will need the activations $\\mathbf{f}_{0\\ldots K}$ and the activations $\\mathbf{h}_{1\\ldots K}$ for the forward pass of backpropagation, so we'll store and return these as well.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5irtyxnLJSGX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_network_output(net_input, all_weights, all_biases):\n",
|
||||
"\n",
|
||||
" # Retrieve number of layers\n",
|
||||
" K = len(all_weights) -1\n",
|
||||
"\n",
|
||||
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
||||
" # and the activations in a second list[all_h].\n",
|
||||
" all_f = [None] * (K+1)\n",
|
||||
" all_h = [None] * (K+1)\n",
|
||||
"\n",
|
||||
" #For convenience, we'll set\n",
|
||||
" # all_h[0] to be the input, and all_f[K] will be the output\n",
|
||||
" all_h[0] = net_input\n",
|
||||
"\n",
|
||||
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
|
||||
" for layer in range(K):\n",
|
||||
" # Update preactivations and activations at this layer according to eqn 7.16\n",
|
||||
" # Remmember to use np.matmul for matrrix multiplications\n",
|
||||
" # TODO -- Replace the lines below\n",
|
||||
" all_f[layer] = all_h[layer]\n",
|
||||
" all_h[layer+1] = all_f[layer]\n",
|
||||
"\n",
|
||||
" # Compute the output from the last hidden layer\n",
|
||||
" # TO DO -- Replace the line below\n",
|
||||
" all_f[K] = np.zeros_like(all_biases[-1])\n",
|
||||
"\n",
|
||||
" # Retrieve the output\n",
|
||||
" net_output = all_f[K]\n",
|
||||
"\n",
|
||||
" return net_output, all_f, all_h"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LgquJUJvJPaN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define in input\n",
|
||||
"net_input = np.ones((D_i,1)) * 1.2\n",
|
||||
"# Compute network output\n",
|
||||
"net_output, all_f, all_h = compute_network_output(net_input,all_weights, all_biases)\n",
|
||||
"print(\"True output = %3.3f, Your answer = %3.3f\"%(1.907, net_output[0,0]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IN6w5m2ZOhnB"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's define a loss function. We'll just use the least squares loss function. We'll also write a function to compute dloss_doutput"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "SxVTKp3IcoBF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def least_squares_loss(net_output, y):\n",
|
||||
" return np.sum((net_output-y) * (net_output-y))\n",
|
||||
"\n",
|
||||
"def d_loss_d_output(net_output, y):\n",
|
||||
" return 2*(net_output -y);"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "6XqWSYWJdhQR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"y = np.ones((D_o,1)) * 20.0\n",
|
||||
"loss = least_squares_loss(net_output, y)\n",
|
||||
"print(\"y = %3.3f Loss = %3.3f\"%(y, loss))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "njF2DUQmfttR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the derivatives of the network. We already computed the forward pass. Let's compute the backward pass."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "98WmyqFYWA-0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# We'll need the indicator function\n",
|
||||
"def indicator_function(x):\n",
|
||||
" x_in = np.array(x)\n",
|
||||
" x_in[x_in>=0] = 1\n",
|
||||
" x_in[x_in<0] = 0\n",
|
||||
" return x_in\n",
|
||||
"\n",
|
||||
"# Main backward pass routine\n",
|
||||
"def backward_pass(all_weights, all_biases, all_f, all_h, y):\n",
|
||||
" # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n",
|
||||
" all_dl_dweights = [None] * (K+1)\n",
|
||||
" all_dl_dbiases = [None] * (K+1)\n",
|
||||
" # And we'll store the derivatives of the loss with respect to the activation and preactivations in lists\n",
|
||||
" all_dl_df = [None] * (K+1)\n",
|
||||
" all_dl_dh = [None] * (K+1)\n",
|
||||
" # Again for convenience we'll stick with the convention that all_h[0] is the net input and all_f[k] in the net output\n",
|
||||
"\n",
|
||||
" # Compute derivatives of net output with respect to loss\n",
|
||||
" all_dl_df[K] = np.array(d_loss_d_output(all_f[K],y))\n",
|
||||
"\n",
|
||||
" # Now work backwards through the network\n",
|
||||
" for layer in range(K,-1,-1):\n",
|
||||
" # TODO Calculate the derivatives of biases at layer this from all_dl_df[layer]. (eq 7.21)\n",
|
||||
" # NOTE! To take a copy of matrix X, use Z=np.array(X)\n",
|
||||
" # REPLACE THIS LINE\n",
|
||||
" all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n",
|
||||
"\n",
|
||||
" # TODO Calculate the derivatives of weight at layer from all_dl_df[K] and all_h[K] (eq 7.22)\n",
|
||||
" # Don't forget to use np.matmul\n",
|
||||
" # REPLACE THIS LINE\n",
|
||||
" all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n",
|
||||
"\n",
|
||||
" # TODO: calculate the derivatives of activations from weight and derivatives of next preactivations (eq 7.20)\n",
|
||||
" # REPLACE THIS LINE\n",
|
||||
" all_dl_dh[layer] = np.zeros_like(all_h[layer])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" if layer > 0:\n",
|
||||
" # TODO Calculate the derivatives of the pre-activation f with respect to activation h (deriv of ReLu function)\n",
|
||||
" # REPLACE THIS LINE\n",
|
||||
" all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n",
|
||||
"\n",
|
||||
" return all_dl_dweights, all_dl_dbiases"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LJng7WpRPLMz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"all_dl_dweights, all_dl_dbiases = backward_pass(all_weights, all_biases, all_f, all_h, y)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9A9MHc4sQvbp"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"np.set_printoptions(precision=3)\n",
|
||||
"# Make space for derivatives computed by finite differences\n",
|
||||
"all_dl_dweights_fd = [None] * (K+1)\n",
|
||||
"all_dl_dbiases_fd = [None] * (K+1)\n",
|
||||
"\n",
|
||||
"# Let's test if we have the derivatives right using finite differences\n",
|
||||
"delta_fd = 0.000001\n",
|
||||
"\n",
|
||||
"# Test the dervatives of the bias vectors\n",
|
||||
"for layer in range(K):\n",
|
||||
" dl_dbias = np.zeros_like(all_dl_dbiases[layer])\n",
|
||||
" # For every element in the bias\n",
|
||||
" for row in range(all_biases[layer].shape[0]):\n",
|
||||
" # Take copy of biases We'll change one element each time\n",
|
||||
" all_biases_copy = [np.array(x) for x in all_biases]\n",
|
||||
" all_biases_copy[layer][row] += delta_fd\n",
|
||||
" network_output_1, *_ = compute_network_output(net_input, all_weights, all_biases_copy)\n",
|
||||
" network_output_2, *_ = compute_network_output(net_input, all_weights, all_biases)\n",
|
||||
" dl_dbias[row] = (least_squares_loss(network_output_1, y) - least_squares_loss(network_output_2,y))/delta_fd\n",
|
||||
" all_dl_dbiases_fd[layer] = np.array(dl_dbias)\n",
|
||||
" print(\"Bias %d, derivatives from backprop:\"%(layer))\n",
|
||||
" print(all_dl_dbiases[layer])\n",
|
||||
" print(\"Bias %d, derivatives from finite differences\"%(layer))\n",
|
||||
" print(all_dl_dbiases_fd[layer])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Test the derivatives of the weights matrices\n",
|
||||
"for layer in range(K):\n",
|
||||
" dl_dweight = np.zeros_like(all_dl_dweights[layer])\n",
|
||||
" # For every element in the bias\n",
|
||||
" for row in range(all_weights[layer].shape[0]):\n",
|
||||
" for col in range(all_weights[layer].shape[1]):\n",
|
||||
" # Take copy of biases We'll change one element each time\n",
|
||||
" all_weights_copy = [np.array(x) for x in all_weights]\n",
|
||||
" all_weights_copy[layer][row][col] += delta_fd\n",
|
||||
" network_output_1, *_ = compute_network_output(net_input, all_weights_copy, all_biases)\n",
|
||||
" network_output_2, *_ = compute_network_output(net_input, all_weights, all_biases)\n",
|
||||
" dl_dweight[row][col] = (least_squares_loss(network_output_1, y) - least_squares_loss(network_output_2,y))/delta_fd\n",
|
||||
" all_dl_dweights_fd[layer] = np.array(dl_dweight)\n",
|
||||
" print(\"Weight %d, derivatives from backprop:\"%(layer))\n",
|
||||
" print(all_dl_dweights[layer])\n",
|
||||
" print(\"Weight %d, derivatives from finite differences\"%(layer))\n",
|
||||
" print(all_dl_dweights_fd[layer])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PK-UtE3hreAK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
354
Notebooks/Chap07/7_3_Initialization.ipynb
Normal file
354
Notebooks/Chap07/7_3_Initialization.ipynb
Normal file
@@ -0,0 +1,354 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNHLXFpiSnUzAbzhtOk+bxu",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap07/7_3_Initialization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 7.3: Initialization**\n",
|
||||
"\n",
|
||||
"This notebook explores weight initialization in deep neural networks as described in section 7.5 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L6chybAVFJW2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "LdIDglk1FFcG"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First let's define a neural network. We'll just choose the weights and biases randomly for now"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nnUoI0m6GyjC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def init_params(K, D, sigma_sq_omega):\n",
|
||||
" # Set seed so we always get the same random numbers\n",
|
||||
" np.random.seed(0)\n",
|
||||
"\n",
|
||||
" # Input layer\n",
|
||||
" D_i = 1\n",
|
||||
" # Output layer\n",
|
||||
" D_o = 1\n",
|
||||
"\n",
|
||||
" # Make empty lists\n",
|
||||
" all_weights = [None] * (K+1)\n",
|
||||
" all_biases = [None] * (K+1)\n",
|
||||
"\n",
|
||||
" # Create input and output layers\n",
|
||||
" all_weights[0] = np.random.normal(size=(D, D_i))*np.sqrt(sigma_sq_omega)\n",
|
||||
" all_weights[-1] = np.random.normal(size=(D_o, D)) * np.sqrt(sigma_sq_omega)\n",
|
||||
" all_biases[0] = np.zeros((D,1))\n",
|
||||
" all_biases[-1]= np.zeros((D_o,1))\n",
|
||||
"\n",
|
||||
" # Create intermediate layers\n",
|
||||
" for layer in range(1,K):\n",
|
||||
" all_weights[layer] = np.random.normal(size=(D,D))*np.sqrt(sigma_sq_omega)\n",
|
||||
" all_biases[layer] = np.zeros((D,1))\n",
|
||||
"\n",
|
||||
" return all_weights, all_biases"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WVM4Tc_jGI0Q"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jZh-7bPXIDq4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_network_output(net_input, all_weights, all_biases):\n",
|
||||
"\n",
|
||||
" # Retrieve number of layers\n",
|
||||
" K = len(all_weights) -1\n",
|
||||
"\n",
|
||||
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
||||
" # and the activations in a second list[all_h].\n",
|
||||
" all_f = [None] * (K+1)\n",
|
||||
" all_h = [None] * (K+1)\n",
|
||||
"\n",
|
||||
" #For convenience, we'll set\n",
|
||||
" # all_h[0] to be the input, and all_f[K] will be the output\n",
|
||||
" all_h[0] = net_input\n",
|
||||
"\n",
|
||||
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
|
||||
" for layer in range(K):\n",
|
||||
" # Update preactivations and activations at this layer according to eqn 7.5\n",
|
||||
" all_f[layer] = all_biases[layer] + np.matmul(all_weights[layer], all_h[layer])\n",
|
||||
" all_h[layer+1] = ReLU(all_f[layer])\n",
|
||||
"\n",
|
||||
" # Compute the output from the last hidden layer\n",
|
||||
" all_f[K] = all_biases[K] + np.matmul(all_weights[K], all_h[K])\n",
|
||||
"\n",
|
||||
" # Retrieve the output\n",
|
||||
" net_output = all_f[K]\n",
|
||||
"\n",
|
||||
" return net_output, all_f, all_h"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LgquJUJvJPaN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's investigate how this the size of the outputs vary as we change the initialization variance:\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bIUrcXnOqChl"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Number of layers\n",
|
||||
"K = 5\n",
|
||||
"# Number of neurons per layer\n",
|
||||
"D = 8\n",
|
||||
" # Input layer\n",
|
||||
"D_i = 1\n",
|
||||
"# Output layer\n",
|
||||
"D_o = 1\n",
|
||||
"# Set variance of initial weights to 1\n",
|
||||
"sigma_sq_omega = 1.0\n",
|
||||
"# Initialize parameters\n",
|
||||
"all_weights, all_biases = init_params(K,D,sigma_sq_omega)\n",
|
||||
"\n",
|
||||
"n_data = 1000\n",
|
||||
"data_in = np.random.normal(size=(1,n_data))\n",
|
||||
"net_output, all_f, all_h = compute_network_output(data_in, all_weights, all_biases)\n",
|
||||
"\n",
|
||||
"for layer in range(K):\n",
|
||||
" print(\"Layer %d, std of hidden units = %3.3f\"%(layer, np.std(all_h[layer])))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "A55z3rKBqO7M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# You can see that the values of the hidden units are increasing on average (the variance is across all hidden units at the layer\n",
|
||||
"# and the 1000 training examples\n",
|
||||
"\n",
|
||||
"# TO DO\n",
|
||||
"# Change this to 50 layers with 80 hidden units per layer\n",
|
||||
"\n",
|
||||
"# TO DO\n",
|
||||
"# Now experiment with sigma_sq_omega to try to stop the variance of the forward computation explode"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VL_SO4tar3DC"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's define a loss function. We'll just use the least squares loss function. We'll also write a function to compute dloss_doutput\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "SxVTKp3IcoBF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def least_squares_loss(net_output, y):\n",
|
||||
" return np.sum((net_output-y) * (net_output-y))\n",
|
||||
"\n",
|
||||
"def d_loss_d_output(net_output, y):\n",
|
||||
" return 2*(net_output -y);"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "6XqWSYWJdhQR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Here's the code for the backward pass"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "98WmyqFYWA-0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# We'll need the indicator function\n",
|
||||
"def indicator_function(x):\n",
|
||||
" x_in = np.array(x)\n",
|
||||
" x_in[x_in>=0] = 1\n",
|
||||
" x_in[x_in<0] = 0\n",
|
||||
" return x_in\n",
|
||||
"\n",
|
||||
"# Main backward pass routine\n",
|
||||
"def backward_pass(all_weights, all_biases, all_f, all_h, y):\n",
|
||||
" # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n",
|
||||
" all_dl_dweights = [None] * (K+1)\n",
|
||||
" all_dl_dbiases = [None] * (K+1)\n",
|
||||
" # And we'll store the derivatives of the loss with respect to the activation and preactivations in lists\n",
|
||||
" all_dl_df = [None] * (K+1)\n",
|
||||
" all_dl_dh = [None] * (K+1)\n",
|
||||
" # Again for convenience we'll stick with the convention that all_h[0] is the net input and all_f[k] in the net output\n",
|
||||
"\n",
|
||||
" # Compute derivatives of net output with respect to loss\n",
|
||||
" all_dl_df[K] = np.array(d_loss_d_output(all_f[K],y))\n",
|
||||
"\n",
|
||||
" # Now work backwards through the network\n",
|
||||
" for layer in range(K,-1,-1):\n",
|
||||
" # Calculate the derivatives of biases at layer from all_dl_df[K]. (eq 7.13, line 1)\n",
|
||||
" all_dl_dbiases[layer] = np.array(all_dl_df[layer])\n",
|
||||
" # Calculate the derivatives of weight at layer from all_dl_df[K] and all_h[K] (eq 7.13, line 2)\n",
|
||||
" all_dl_dweights[layer] = np.matmul(all_dl_df[layer], all_h[layer].transpose())\n",
|
||||
"\n",
|
||||
" # Calculate the derivatives of activations from weight and derivatives of next preactivations (eq 7.13, line 3 second part)\n",
|
||||
" all_dl_dh[layer] = np.matmul(all_weights[layer].transpose(), all_dl_df[layer])\n",
|
||||
" # Calculate the derivatives of the pre-activation f with respect to activation h (eq 7.13, line 3, first part)\n",
|
||||
" if layer > 0:\n",
|
||||
" all_dl_df[layer-1] = indicator_function(all_f[layer-1]) * all_dl_dh[layer]\n",
|
||||
"\n",
|
||||
" return all_dl_dweights, all_dl_dbiases, all_dl_dh, all_dl_df"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LJng7WpRPLMz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's look at what happens to the magnitude of the gradients on the way back."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "phFnbthqwhFi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Number of layers\n",
|
||||
"K = 5\n",
|
||||
"# Number of neurons per layer\n",
|
||||
"D = 8\n",
|
||||
" # Input layer\n",
|
||||
"D_i = 1\n",
|
||||
"# Output layer\n",
|
||||
"D_o = 1\n",
|
||||
"# Set variance of initial weights to 1\n",
|
||||
"sigma_sq_omega = 1.0\n",
|
||||
"# Initialize parameters\n",
|
||||
"all_weights, all_biases = init_params(K,D,sigma_sq_omega)\n",
|
||||
"\n",
|
||||
"# For simplicity we'll just consider the gradients of the weights and biases between the first and last hidden layer\n",
|
||||
"n_data = 100\n",
|
||||
"aggregate_dl_df = [None] * (K+1)\n",
|
||||
"for layer in range(1,K):\n",
|
||||
" # These 3D arrays will store the gradients for every data point\n",
|
||||
" aggregate_dl_df[layer] = np.zeros((D,n_data))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# We'll have to compute the derivatives of the parameters for each data point separately\n",
|
||||
"for c_data in range(n_data):\n",
|
||||
" data_in = np.random.normal(size=(1,1))\n",
|
||||
" y = np.zeros((1,1))\n",
|
||||
" net_output, all_f, all_h = compute_network_output(data_in, all_weights, all_biases)\n",
|
||||
" all_dl_dweights, all_dl_dbiases, all_dl_dh, all_dl_df = backward_pass(all_weights, all_biases, all_f, all_h, y)\n",
|
||||
" for layer in range(1,K):\n",
|
||||
" aggregate_dl_df[layer][:,c_data] = np.squeeze(all_dl_df[layer])\n",
|
||||
"\n",
|
||||
"for layer in range(1,K):\n",
|
||||
" print(\"Layer %d, std of dl_dh = %3.3f\"%(layer, np.std(aggregate_dl_df[layer].ravel())))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9A9MHc4sQvbp"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# You can see that the values of the hidden units are increasing on average (the variance is across all hidden units at the layer\n",
|
||||
"# and the 1000 training examples\n",
|
||||
"\n",
|
||||
"# TO DO\n",
|
||||
"# Change this to 50 layers with 80 hidden units per layer\n",
|
||||
"\n",
|
||||
"# TO DO\n",
|
||||
"# Now experiment with sigma_sq_omega to try to stop the variance of the gradients exploding\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gtokc0VX0839"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
238
Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb
Normal file
238
Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb
Normal file
@@ -0,0 +1,238 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4",
|
||||
"authorship_tag": "ABX9TyNLj3HOpVB87nRu7oSLuBaU",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 8.1: MNIST_1D_Performance**\n",
|
||||
"\n",
|
||||
"This notebook runs a simple neural network on the MNIST1D dataset as in figure 8.2a. It uses code from https://github.com/greydanus/mnist1d to generate the data.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L6chybAVFJW2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ifVjS4cTOqKz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qyE7G1StPIqO"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's generate a training and test dataset using the MNIST1D code. The dataset gets saved as a .pkl file so it doesn't have to be regenerated each time."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F7LNq72SP6jO"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YLxf7dJfPaqw"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"D_i = 40 # Input dimensions\n",
|
||||
"D_k = 100 # Hidden dimensions\n",
|
||||
"D_o = 10 # Output dimensions\n",
|
||||
"# TO DO:\n",
|
||||
"# Define a model with two hidden layers of size 100\n",
|
||||
"# And ReLU activations between them\n",
|
||||
"# Replace this line (see Figure 7.8 of book for help):\n",
|
||||
"model = torch.nn.Sequential(torch.nn.Linear(D_i, D_o));\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" # TO DO:\n",
|
||||
" # Initialize the parameters with He initialization\n",
|
||||
" # Replace this line (see figure 7.8 of book for help)\n",
|
||||
" print(\"Initializing layer\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Call the function you just defined\n",
|
||||
"model.apply(weights_init)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FxaB5vc0uevl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# choose cross entropy loss function (equation 5.24)\n",
|
||||
"loss_function = torch.nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 10 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
|
||||
"# create 100 dummy data points and store in data loader class\n",
|
||||
"x_train = torch.tensor(data['x'].astype('float32'))\n",
|
||||
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
||||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||
"y_test = torch.tensor(data['y_test'].astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 50\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"losses_train = np.zeros((n_epoch))\n",
|
||||
"errors_train = np.zeros((n_epoch))\n",
|
||||
"losses_test = np.zeros((n_epoch))\n",
|
||||
"errors_test = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, batch in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = batch\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_test = model(x_test)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n",
|
||||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_test[epoch]= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n",
|
||||
" losses_train[epoch] = loss_function(pred_train, y_train).item()\n",
|
||||
" losses_test[epoch]= loss_function(pred_test, y_test).item()\n",
|
||||
" print(f'Epoch {epoch:5d}, train loss {losses_train[epoch]:.6f}, train error {errors_train[epoch]:3.2f}, test loss {losses_test[epoch]:.6f}, test error {errors_test[epoch]:3.2f}')\n",
|
||||
"\n",
|
||||
" # tell scheduler to consider updating learning rate\n",
|
||||
" scheduler.step()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_rX6N3VyyQTY"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_test,'b-',label='test')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train[-1],errors_test[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(losses_train,'r-',label='train')\n",
|
||||
"ax.plot(losses_test,'b-',label='test')\n",
|
||||
"ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Loss')\n",
|
||||
"ax.set_title('Train loss %3.2f, Test loss %3.2f'%(losses_train[-1],losses_test[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yI-l6kA_EH9G"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"**TO DO**\n",
|
||||
"\n",
|
||||
"Play with the model -- try changing the number of layers, hidden units, learning rate, batch size, momentum or anything else you like. See if you can improve the test results.\n",
|
||||
"\n",
|
||||
"Is it a good idea to optimize the hyperparameters in this way? Will the final result be a good estimate of the true test performance?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "q-yT6re6GZS4"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
350
Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb
Normal file
350
Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb
Normal file
@@ -0,0 +1,350 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPz1B8kFc21JvGTDwqniloA",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 8.2: Bias-Variance Trade-Off**\n",
|
||||
"\n",
|
||||
"This notebook investigates the bias-variance trade-off for the toy model used throughout chapter 8 and reproduces the bias/variance trade off curves seen in figure 8.9.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L6chybAVFJW2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "01Cu4SGZOVAi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# The true function that we are trying to estimate, defined on [0,1]\n",
|
||||
"def true_function(x):\n",
|
||||
" y = np.exp(np.sin(x*(2*3.1413)))\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bSK2_EGyOgHu"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate some data points with or without noise\n",
|
||||
"def generate_data(n_data, sigma_y=0.3):\n",
|
||||
" # Generate x values quasi uniformly\n",
|
||||
" x = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
|
||||
"\n",
|
||||
" # y value from running through functoin and adding noise\n",
|
||||
" y = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" y[i] = true_function(x[i])\n",
|
||||
" y[i] += np.random.normal(0, sigma_y, 1)\n",
|
||||
" return x,y\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yzZr2tcJO5pq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(x_func, y_func, 'k-')\n",
|
||||
" if sigma_func is not None:\n",
|
||||
" ax.fill_between(x_func, y_func-2*sigma_func, y_func+2*sigma_func, color='lightgray')\n",
|
||||
"\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax.plot(x_data, y_data, 'o', color='#d18362')\n",
|
||||
"\n",
|
||||
" if x_model is not None:\n",
|
||||
" ax.plot(x_model, y_model, '-', color='#7fe7de')\n",
|
||||
"\n",
|
||||
" if sigma_model is not None:\n",
|
||||
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
||||
"\n",
|
||||
" ax.set_xlim(0,1)\n",
|
||||
" ax.set_xlabel('Input, $x$')\n",
|
||||
" ax.set_ylabel('Output, $y$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "xfq1SD_ZOi6G"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate true function\n",
|
||||
"x_func = np.linspace(0, 1.0, 100)\n",
|
||||
"y_func = true_function(x_func);\n",
|
||||
"\n",
|
||||
"# Generate some data points\n",
|
||||
"np.random.seed(1)\n",
|
||||
"sigma_func = 0.3\n",
|
||||
"n_data = 15\n",
|
||||
"x_data,y_data = generate_data(n_data, sigma_func)\n",
|
||||
"\n",
|
||||
"# Plot the functinon, data and uncertainty\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2tP-p7B6Qnuf"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define model -- beta is a scalar and omega has size n_hidden,1\n",
|
||||
"def network(x, beta, omega):\n",
|
||||
" # Retrieve number of hidden units\n",
|
||||
" n_hidden = omega.shape[0]\n",
|
||||
"\n",
|
||||
" y = np.zeros_like(x)\n",
|
||||
" for c_hidden in range(n_hidden):\n",
|
||||
" # Evaluate activations based on shifted lines (figure 8.4b-d)\n",
|
||||
" line_vals = x - c_hidden/n_hidden\n",
|
||||
" h = line_vals * (line_vals > 0)\n",
|
||||
" # Weight activations by omega parameters and sum\n",
|
||||
" y = y + omega[c_hidden] * h\n",
|
||||
" # Add bias, beta\n",
|
||||
" y = y + beta\n",
|
||||
"\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zYMLtS3nT-0y"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# This fits the n_hidden+1 parameters (see fig 8.4a) in closed form.\n",
|
||||
"# If you have studied linear algebra, then you will know it is a least\n",
|
||||
"# squares solution of the form (A^TA)^-1A^Tb. If you don't recognize that,\n",
|
||||
"# then just take it on trust that this gives you the best possible solution.\n",
|
||||
"def fit_model_closed_form(x,y,n_hidden):\n",
|
||||
" n_data = len(x)\n",
|
||||
" A = np.ones((n_data, n_hidden+1))\n",
|
||||
" for i in range(n_data):\n",
|
||||
" for j in range(1,n_hidden+1):\n",
|
||||
" A[i,j] = x[i]-(j-1)/n_hidden\n",
|
||||
" if A[i,j] < 0:\n",
|
||||
" A[i,j] = 0;\n",
|
||||
"\n",
|
||||
" ATA = np.matmul(np.transpose(A), A)\n",
|
||||
" ATAInv = np.linalg.inv(ATA)\n",
|
||||
" ATAInvAT = np.matmul(ATAInv, np.transpose(A))\n",
|
||||
" beta_omega = np.matmul(ATAInvAT,y)\n",
|
||||
" beta = beta_omega[0]\n",
|
||||
" omega = beta_omega[1:]\n",
|
||||
"\n",
|
||||
" return beta, omega\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MinJxLh1XTHx"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Closed form solution\n",
|
||||
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=3)\n",
|
||||
"\n",
|
||||
"# Get prediction for model across graph grange\n",
|
||||
"x_model = np.linspace(0,1,100);\n",
|
||||
"y_model = network(x_model, beta, omega)\n",
|
||||
"\n",
|
||||
"# Draw the function and the model\n",
|
||||
"plot_function(x_func, y_func, x_data,y_data, x_model, y_model)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HP7fiwNFSfWz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run the model many times with different datasets and return the mean and variance\n",
|
||||
"def get_model_mean_variance(n_data, n_datasets, n_hidden, sigma_func):\n",
|
||||
"\n",
|
||||
" # Create array that stores model results in rows\n",
|
||||
" y_model_all = np.zeros((n_datasets, x_model.shape[0]))\n",
|
||||
"\n",
|
||||
" for c_dataset in range(n_datasets):\n",
|
||||
" # TODO -- Generate n_data x,y, pairs with standard divation sigma_func\n",
|
||||
" # Replace this line\n",
|
||||
" x_data,y_data = np.zeros([1,n_data]),np.zeros([1,n_data])\n",
|
||||
"\n",
|
||||
" # TODO -- Fit the model\n",
|
||||
" # Replace this line:\n",
|
||||
" beta = 0; omega = np.zeros([n_hidden,1])\n",
|
||||
"\n",
|
||||
" # TODO -- Run the fitted model on x_model\n",
|
||||
" # Replace this line\n",
|
||||
" y_model = np.zeros_like(x_model);\n",
|
||||
"\n",
|
||||
" # Store the model results\n",
|
||||
" y_model_all[c_dataset,:] = y_model\n",
|
||||
"\n",
|
||||
" # Get mean and standard deviation of model\n",
|
||||
" mean_model = np.mean(y_model_all,axis=0)\n",
|
||||
" std_model = np.std(y_model_all,axis=0)\n",
|
||||
"\n",
|
||||
" # Return the mean and standard deviation of the fitted model\n",
|
||||
" return mean_model, std_model"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bL553uSaYidy"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's generate N random data sets, fit the model N times and look the mean and variance\n",
|
||||
"n_datasets = 100\n",
|
||||
"n_data = 15\n",
|
||||
"sigma_func = 0.3\n",
|
||||
"n_hidden = 5\n",
|
||||
"\n",
|
||||
"# Get mean and variance of fitted model\n",
|
||||
"np.random.seed(1)\n",
|
||||
"mean_model, std_model = get_model_mean_variance(n_data, n_datasets, n_hidden, sigma_func) ;\n",
|
||||
"\n",
|
||||
"# Plot the results\n",
|
||||
"plot_function(x_func, y_func, x_data,y_data, x_model, mean_model, sigma_model=std_model)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Wxk64t2SoX9c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Experiment with changing the number of data points and the number of hidden variables\n",
|
||||
"# in the model. Get a feeling for what happens in terms of the bias (squared deviation between cyan and black lines)\n",
|
||||
"# and the variance (gray region) as we manipulate these quantities."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QO6mFaKNJ3J_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the noise, bias and variance as a function of capacity\n",
|
||||
"hidden_variables = [1,2,3,4,5,6,7,8,9,10,11,12]\n",
|
||||
"bias = np.zeros((len(hidden_variables),1)) ;\n",
|
||||
"variance = np.zeros((len(hidden_variables),1)) ;\n",
|
||||
"\n",
|
||||
"n_datasets = 100\n",
|
||||
"n_data = 15\n",
|
||||
"sigma_func = 0.3\n",
|
||||
"n_hidden = 5\n",
|
||||
"\n",
|
||||
"# Set random seed so that get same result every time\n",
|
||||
"np.random.seed(1)\n",
|
||||
"\n",
|
||||
"for c_hidden in range(len(hidden_variables)):\n",
|
||||
" # Get mean and variance of fitted model\n",
|
||||
" mean_model, std_model = get_model_mean_variance(n_data, n_datasets, hidden_variables[c_hidden], sigma_func) ;\n",
|
||||
" # TODO -- Estimate bias and variance\n",
|
||||
" # Replace these lines\n",
|
||||
"\n",
|
||||
" # Compute variance -- average of the model variance (average squared deviation of fitted models around mean fitted model)\n",
|
||||
" variance[c_hidden] = 0\n",
|
||||
" # Compute bias (average squared deviaton of mean fitted model around true function)\n",
|
||||
" bias[c_hidden] = 0\n",
|
||||
"\n",
|
||||
"# Plot the results\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(hidden_variables, variance, 'k-')\n",
|
||||
"ax.plot(hidden_variables, bias, 'r-')\n",
|
||||
"ax.plot(hidden_variables, variance+bias, 'g-')\n",
|
||||
"ax.set_xlim(0,12)\n",
|
||||
"ax.set_ylim(0,0.5)\n",
|
||||
"ax.set_xlabel(\"Model capacity\")\n",
|
||||
"ax.set_ylabel(\"Variance\")\n",
|
||||
"ax.legend(['Variance', 'Bias', 'Bias + Variance'])\n",
|
||||
"plt.show()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ICKjqAlx3Ka9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"id": "WKUyOAywL_b2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
270
Notebooks/Chap08/8_3_Double_Descent.ipynb
Normal file
270
Notebooks/Chap08/8_3_Double_Descent.ipynb
Normal file
@@ -0,0 +1,270 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4",
|
||||
"authorship_tag": "ABX9TyN/KUpEObCKnHZ/4Onp5sHG",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap08/8_3_Double_Descent.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 8.3: Double Descent**\n",
|
||||
"\n",
|
||||
"This notebook investigates double descent as described in section 8.4 of the book.\n",
|
||||
"\n",
|
||||
"It uses the MNIST-1D database which can be found at https://github.com/greydanus/mnist1d\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L6chybAVFJW2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fn9BP5N5TguP"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random\n",
|
||||
"random.seed(0)\n",
|
||||
"\n",
|
||||
"# Try attaching to GPU -- Use \"Change Runtime Type to change to GPUT\"\n",
|
||||
"DEVICE = str(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))\n",
|
||||
"print('Using:', DEVICE)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hFxuHpRqTgri"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"args.num_samples = 8000\n",
|
||||
"args.train_split = 0.5\n",
|
||||
"args.corr_noise_scale = 0.25\n",
|
||||
"args.iid_noise_scale=2e-2\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=True)\n",
|
||||
"\n",
|
||||
"# Add 15% noise to training labels\n",
|
||||
"for c_y in range(len(data['y'])):\n",
|
||||
" random_number = random.random()\n",
|
||||
" if random_number < 0.15 :\n",
|
||||
" random_int = int(random.random() * 10)\n",
|
||||
" data['y'][c_y] = random_int\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PW2gyXL5UkLU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters with He initialization\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)\n",
|
||||
"\n",
|
||||
"# Return an initialized model with two hidden layers and n_hidden hidden units at each\n",
|
||||
"def get_model(n_hidden):\n",
|
||||
"\n",
|
||||
" D_i = 40 # Input dimensions\n",
|
||||
" D_k = n_hidden # Hidden dimensions\n",
|
||||
" D_o = 10 # Output dimensions\n",
|
||||
"\n",
|
||||
" # Define a model with two hidden layers of size 100\n",
|
||||
" # And ReLU activations between them\n",
|
||||
" model = nn.Sequential(\n",
|
||||
" nn.Linear(D_i, D_k),\n",
|
||||
" nn.ReLU(),\n",
|
||||
" nn.Linear(D_k, D_k),\n",
|
||||
" nn.ReLU(),\n",
|
||||
" nn.Linear(D_k, D_o))\n",
|
||||
"\n",
|
||||
" # Call the function you just defined\n",
|
||||
" model.apply(weights_init)\n",
|
||||
"\n",
|
||||
" # Return the model\n",
|
||||
" return model ;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hAIvZOAlTnk9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def fit_model(model, data):\n",
|
||||
"\n",
|
||||
" # choose cross entropy loss function (equation 5.24)\n",
|
||||
" loss_function = torch.nn.CrossEntropyLoss()\n",
|
||||
" # construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
" # optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n",
|
||||
" optimizer = torch.optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # create 100 dummy data points and store in data loader class\n",
|
||||
" x_train = torch.tensor(data['x'].astype('float32'))\n",
|
||||
" y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
||||
" x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||
" y_test = torch.tensor(data['y_test'].astype('long'))\n",
|
||||
"\n",
|
||||
" # load the data into a class that creates the batches\n",
|
||||
" data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
" # loop over the dataset n_epoch times\n",
|
||||
" n_epoch = 1000\n",
|
||||
"\n",
|
||||
" for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, batch in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = batch\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_test = model(x_test)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n",
|
||||
" errors_train = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_test= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n",
|
||||
" losses_train = loss_function(pred_train, y_train).item()\n",
|
||||
" losses_test= loss_function(pred_test, y_test).item()\n",
|
||||
" if epoch%100 ==0 :\n",
|
||||
" print(f'Epoch {epoch:5d}, train loss {losses_train:.6f}, train error {errors_train:3.2f}, test loss {losses_test:.6f}, test error {errors_test:3.2f}')\n",
|
||||
"\n",
|
||||
" return errors_train, errors_test\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AazlQhheWmHk"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The following code produces the double descent curve by training the model with different numbers of hidden units and plotting the test error.\n",
|
||||
"\n",
|
||||
"TO DO:\n",
|
||||
"\n",
|
||||
"*Before* you run the code, and considering that there are 4000 training examples predict:<br>\n",
|
||||
"\n",
|
||||
"1. At what capacity do you think the training error will become zero?\n",
|
||||
"2. At what capacity do you expect the first minima of the double descent curve to appear?\n",
|
||||
"3. At what capacity do you expect the maximum of the double descent curve to appear?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IcP4UPMudxPS"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# This code will take a while (~30 mins on GPU) to run! Go and make a cup of coffee!\n",
|
||||
"\n",
|
||||
"hidden_variables = np.array([2,4,6,8,10,14,18,22,26,30,35,40,45,50,55,60,70,80,90,100,120,140,160,180,200,250,300,400]) ;\n",
|
||||
"errors_train_all = np.zeros_like(hidden_variables)\n",
|
||||
"errors_test_all = np.zeros_like(hidden_variables)\n",
|
||||
"\n",
|
||||
"# For each hidden variable size\n",
|
||||
"for c_hidden in range(len(hidden_variables)):\n",
|
||||
" print(f'Training model with {hidden_variables[c_hidden]:3d} hidden variables')\n",
|
||||
" # Get a model\n",
|
||||
" model = get_model(hidden_variables[c_hidden]) ;\n",
|
||||
" # Train the model\n",
|
||||
" errors_train, errors_test = fit_model(model, data)\n",
|
||||
" # Store the results\n",
|
||||
" errors_train_all[c_hidden] = errors_train\n",
|
||||
" errors_test_all[c_hidden]= errors_test"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K4OmBZGHWXpk"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(hidden_variables, errors_train_all,'r-',label='train')\n",
|
||||
"ax.plot(hidden_variables, errors_test_all,'b-',label='test')\n",
|
||||
"ax.set_ylim(0,100);\n",
|
||||
"ax.set_xlabel('No hidden variables'); ax.set_ylabel('Error')\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Rw-iRboTXbck"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
236
Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb
Normal file
236
Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb
Normal file
@@ -0,0 +1,236 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPXPDEQiwNw+kYhWfg4kjz6",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 8.4: High-dimensional spaces**\n",
|
||||
"\n",
|
||||
"This notebook investigates the strange properties of high-dimensional spaces as discussed in the notes at the end of chapter 8.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EjLK-kA1KnYX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "4ESMmnkYEVAb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import scipy.special as sci"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# How close are points in high dimensions?\n",
|
||||
"\n",
|
||||
"In this part of the notebook, we investigate how close random points are in 2D, 100D, and 1000D. In each case, we generate 1000 points and calculate the Euclidean distance between each pair. "
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MonbPEitLNgN"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Fix the random seed so we all have the same random numbers\n",
|
||||
"np.random.seed(0)\n",
|
||||
"n_data = 1000\n",
|
||||
"# Create 1000 data examples (columns) each with 2 dimensions (rows)\n",
|
||||
"n_dim = 2\n",
|
||||
"x_2D = np.random.normal(size=(n_dim,n_data))\n",
|
||||
"# Create 1000 data examples (columns) each with 100 dimensions (rows)\n",
|
||||
"n_dim = 100\n",
|
||||
"x_100D = np.random.normal(size=(n_dim,n_data))\n",
|
||||
"# Create 1000 data examples (columns) each with 1000 dimensions (rows)\n",
|
||||
"n_dim = 1000\n",
|
||||
"x_1000D = np.random.normal(size=(n_dim,n_data))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "vZSHVmcWEk14"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def distance_ratio(x):\n",
|
||||
" # TODO -- replace the two lines below to calculate the largest and smallest Euclidean distance between\n",
|
||||
" # the data points in the columns of x. DO NOT include the distance between the data point\n",
|
||||
" # and itself (which is obviously zero)\n",
|
||||
" smallest_dist = 1.0\n",
|
||||
" largest_dist = 1.0\n",
|
||||
"\n",
|
||||
" # Calculate the ratio and return\n",
|
||||
" dist_ratio = largest_dist / smallest_dist\n",
|
||||
" return dist_ratio"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PhVmnUs8ErD9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print('Ratio of largest to smallest distance 2D: %3.3f'%(distance_ratio(x_2D)))\n",
|
||||
"print('Ratio of largest to smallest distance 100D: %3.3f'%(distance_ratio(x_100D)))\n",
|
||||
"print('Ratio of largest to smallest distance 1000D: %3.3f'%(distance_ratio(x_1000D)))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0NdPxfn5GQuJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"If you did this right, you will see that the distance between the nearest and farthest two points in high dimensions is almost the same. "
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uT68c0k8uBxs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Volume of a hypersphere\n",
|
||||
"\n",
|
||||
"In the second part of this notebook we calculate the volume of a hypersphere of radius 0.5 (i.e., of diameter 1) as a function of the radius. Note that you you can check your answer by doing the calculation for 2D using the standard formula for the area of a circle and making sure it matches."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "b2FYKV1SL4Z7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def volume_of_hypersphere(diameter, dimensions):\n",
|
||||
" # Formula given in Problem 8.7 of the book\n",
|
||||
" # You will need sci.special.gamma()\n",
|
||||
" # Check out: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.gamma.html\n",
|
||||
" # Also use this value for pi\n",
|
||||
" pi = np.pi\n",
|
||||
" # TODO replace this code with formula for the volume of a hypersphere\n",
|
||||
" volume = 1.0\n",
|
||||
"\n",
|
||||
" return volume\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CZoNhD8XJaHR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"diameter = 1.0\n",
|
||||
"for c_dim in range(1,11):\n",
|
||||
" print(\"Volume of unit diameter hypersphere in %d dimensions is %3.3f\"%(c_dim, volume_of_hypersphere(diameter, c_dim)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fNTBlg_GPEUh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You should see that the volume decreases to almost nothing in high dimensions. All of the volume is in the corners of the unit hypercube (which always has volume 1)."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PtaeGSNBunJl"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Proportion of hypersphere in outer shell\n",
|
||||
"\n",
|
||||
"In the third part of the notebook you will calculate what proportion of the volume of a hypersphere is in the outer 1% of the radius/diameter. Calculate the volume of a hypersphere and then the volume of a hypersphere with 0.99 of the radius and then figure out the ratio. "
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GdyMeOBmoXyF"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_prop_of_volume_in_outer_1_percent(dimension):\n",
|
||||
" # TODO -- replace this line\n",
|
||||
" proportion = 1.0\n",
|
||||
"\n",
|
||||
" return proportion"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8_CxZ2AIpQ8w"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# While we're here, let's look at how much of the volume is in the outer 1% of the radius\n",
|
||||
"for c_dim in [1,2,10,20,50,100,150,200,250,300]:\n",
|
||||
" print('Proportion of volume in outer 1 percent of radius in %d dimensions =%3.3f'%(c_dim, get_prop_of_volume_in_outer_1_percent(c_dim)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LtMDIn2qPVfJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You should see see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent. <br><br>\n",
|
||||
"\n",
|
||||
"The conclusion of all of this is that in high dimensions you should be sceptical of your intuitions about how things work. I have tried to visualize many things in one or two dimensions in the book, but you should also be sceptical about these visualizations!"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "n_FVeb-ZwzxD"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
538
Notebooks/Chap09/9_1_L2_Regularization.ipynb
Normal file
538
Notebooks/Chap09/9_1_L2_Regularization.ipynb
Normal file
@@ -0,0 +1,538 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPJzymRTuvoWggIskM2Kamc",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_1_L2_Regularization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 9.1: L2 Regularization**\n",
|
||||
"\n",
|
||||
"This notebook investigates adding L2 regularization to the loss function for the Gabor model as in figure 9.1.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from matplotlib import cm\n",
|
||||
"from matplotlib.colors import ListedColormap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's create our training data 30 pairs {x_i, y_i}\n",
|
||||
"# We'll try to fit the Gabor model to these data\n",
|
||||
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
||||
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
||||
" -1.096e+01,4.073e-01,-9.467e+00,8.560e+00,1.062e+01,-1.729e-01,\n",
|
||||
" 1.040e+01,-1.261e+01,1.574e-01,-1.304e+01,-2.156e+00,-1.210e+01,\n",
|
||||
" -1.119e+01,2.902e+00,-8.220e+00,-1.179e+01,-8.391e+00,-4.505e+00],\n",
|
||||
" [-1.051e+00,-2.482e-02,8.896e-01,-4.943e-01,-9.371e-01,4.306e-01,\n",
|
||||
" 9.577e-03,-7.944e-02 ,1.624e-01,-2.682e-01,-3.129e-01,8.303e-01,\n",
|
||||
" -2.365e-02,5.098e-01,-2.777e-01,3.367e-01,1.927e-01,-2.222e-01,\n",
|
||||
" 6.352e-02,6.888e-03,3.224e-02,1.091e-02,-5.706e-01,-5.258e-02,\n",
|
||||
" -3.666e-02,1.709e-01,-4.805e-02,2.008e-01,-1.904e-01,5.952e-01]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4cRkrh9MZ58Z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Gabor model definition\n",
|
||||
"def model(phi,x):\n",
|
||||
" sin_component = np.sin(phi[0] + 0.06 * phi[1] * x)\n",
|
||||
" gauss_component = np.exp(-(phi[0] + 0.06 * phi[1] * x) * (phi[0] + 0.06 * phi[1] * x) / 32)\n",
|
||||
" y_pred= sin_component * gauss_component\n",
|
||||
" return y_pred"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WQUERmb2erAe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw model\n",
|
||||
"def draw_model(data,model,phi,title=None):\n",
|
||||
" x_model = np.arange(-15,15,0.1)\n",
|
||||
" y_model = model(phi,x_model)\n",
|
||||
"\n",
|
||||
" fix, ax = plt.subplots()\n",
|
||||
" ax.plot(data[0,:],data[1,:],'bo')\n",
|
||||
" ax.plot(x_model,y_model,'m-')\n",
|
||||
" ax.set_xlim([-15,15]);ax.set_ylim([-1,1])\n",
|
||||
" ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||
" if title is not None:\n",
|
||||
" ax.set_title(title)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qFRe9POHF2le"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parmaeters and draw the model\n",
|
||||
"phi = np.zeros((2,1))\n",
|
||||
"phi[0] = -5 # Horizontal offset\n",
|
||||
"phi[1] = 25 # Frequency\n",
|
||||
"draw_model(data,model,phi, \"Initial parameters\")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TXx1Tpd1Tl-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's\n",
|
||||
"compute the sum of squares loss for the training data"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QU5mdGvpTtEG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_loss(data_x, data_y, model, phi):\n",
|
||||
" pred_y = model(phi, data_x)\n",
|
||||
" loss = np.sum((pred_y-data_y)*(pred_y-data_y))\n",
|
||||
" return loss"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "I7dqTY2Gg7CR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's plot the whole loss function"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F3trnavPiHpH"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define pretty colormap\n",
|
||||
"my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||
"my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||
"r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||
"g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||
"b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||
"my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||
"\n",
|
||||
"def draw_loss_function(compute_loss, data, model, my_colormap, phi_iters = None):\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(freqs_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_loss(data[0,:], data[1,:], model, np.array([[offsets_mesh[idslope]], [slope]]))\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(offsets_mesh,freqs_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(offsets_mesh,freqs_mesh,loss_mesh,20,colors=['#80808080'])\n",
|
||||
" if phi_iters is not None:\n",
|
||||
" ax.plot(phi_iters[0,:], phi_iters[1,:],'go-')\n",
|
||||
" ax.set_ylim([2.5,22.5])\n",
|
||||
" ax.set_xlabel('Offset $\\phi_{0}$'); ax.set_ylabel('Frequency, $\\phi_{1}$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K-NTHpAAHlCl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"draw_loss_function(compute_loss, data, model, my_colormap)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "l8HbvIupnTME"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the gradient vector for a given set of parameters:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\frac{\\partial L}{\\partial \\boldsymbol\\phi} = \\begin{bmatrix}\\frac{\\partial L}{\\partial \\phi_0} \\\\\\frac{\\partial L}{\\partial \\phi_1} \\end{bmatrix}.\n",
|
||||
"\\end{equation}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "s9Duf05WqqSC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# These came from writing out the expression for the sum of squares loss and taking the\n",
|
||||
"# derivative with respect to phi0 and phi1. It was a lot of hassle to get it right!\n",
|
||||
"def gabor_deriv_phi0(data_x,data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = cos_component * gauss_component - sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2* deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def gabor_deriv_phi1(data_x, data_y,phi0, phi1):\n",
|
||||
" x = 0.06 * phi1 * data_x + phi0\n",
|
||||
" y = data_y\n",
|
||||
" cos_component = np.cos(x)\n",
|
||||
" sin_component = np.sin(x)\n",
|
||||
" gauss_component = np.exp(-0.5 * x *x / 16)\n",
|
||||
" deriv = 0.06 * data_x * cos_component * gauss_component - 0.06 * data_x*sin_component * gauss_component * x / 16\n",
|
||||
" deriv = 2*deriv * (sin_component * gauss_component - y)\n",
|
||||
" return np.sum(deriv)\n",
|
||||
"\n",
|
||||
"def compute_gradient(data_x, data_y, phi):\n",
|
||||
" dl_dphi0 = gabor_deriv_phi0(data_x, data_y, phi[0],phi[1])\n",
|
||||
" dl_dphi1 = gabor_deriv_phi1(data_x, data_y, phi[0],phi[1])\n",
|
||||
" # Return the gradient\n",
|
||||
" return np.array([[dl_dphi0],[dl_dphi1]])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UpswmkL2qwBT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we are ready to find the minimum. For simplicity, we'll just use regular (non-stochastic) gradient descent with a fixed learning rate."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5EIjMM9Fw2eT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def gradient_descent_step(phi, data, model):\n",
|
||||
" # Step 1: Compute the gradient\n",
|
||||
" gradient = compute_gradient(data[0,:],data[1,:], phi)\n",
|
||||
" # Step 2: Update the parameters -- note we want to search in the negative (downhill direction)\n",
|
||||
" alpha = 0.1\n",
|
||||
" phi = phi - alpha * gradient\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YVq6rmaWRD2M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 41\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = 2.6\n",
|
||||
"phi_all[1,0] = 8.5\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,0:1])\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step(phi_all[:,c_step:c_step+1],data, model)\n",
|
||||
" # Measure loss and draw model every 4th step\n",
|
||||
" if c_step % 8 == 0:\n",
|
||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"draw_loss_function(compute_loss, data, model, my_colormap, phi_all)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tOLd0gtdRLLS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Unfortunately, when we start from this position, the solution descends to a local minimum and the final model doesn't fit well.<br><br>\n",
|
||||
"\n",
|
||||
"But what if we had some weak knowledge that the solution was in the vicinity of $\\phi_0=0.0$, $\\phi_{1} = 12.5$ (the center of the plot)?\n",
|
||||
"\n",
|
||||
"Let's add a term to the loss function that penalizes solutions that deviate from this point. \n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"L'[\\boldsymbol\\phi] = L[\\boldsymbol\\phi]+ \\lambda\\cdot \\Bigl(\\phi_{0}^2+(\\phi_1-12.5)^2\\Bigr)\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"where $\\lambda$ controls the relative importance of the original loss and the regularization term"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3kKW2D5vEwhA"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Computes the regularization term\n",
|
||||
"def compute_reg_term(phi0,phi1):\n",
|
||||
" # TODO compute the regularization term (term in large brackets in the above equstion)\n",
|
||||
" # Replace this line\n",
|
||||
" reg_term = 0.0\n",
|
||||
"\n",
|
||||
" return reg_term ;\n",
|
||||
"\n",
|
||||
"# Define the loss function\n",
|
||||
"# Note I called the weighting lambda_ to avoid confusing it with python lambda functions\n",
|
||||
"def compute_loss2(data_x, data_y, model, phi, lambda_):\n",
|
||||
" pred_y = model(phi, data_x)\n",
|
||||
" loss = np.sum((pred_y-data_y)*(pred_y-data_y))\n",
|
||||
" # Add the new term to the loss\n",
|
||||
" loss = loss + lambda_ * compute_reg_term(phi[0],phi[1])\n",
|
||||
"\n",
|
||||
" return loss"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4IgsQelgDdQ-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Code to draw the regularization function\n",
|
||||
"def draw_reg_function():\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(freqs_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_reg_term(offsets_mesh[idslope], slope)\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(offsets_mesh,freqs_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(offsets_mesh,freqs_mesh,loss_mesh,20,colors=['#80808080'])\n",
|
||||
" ax.set_ylim([2.5,22.5])\n",
|
||||
" ax.set_xlabel('Offset $\\phi_{0}$'); ax.set_ylabel('Frequency, $\\phi_{1}$')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"# Draw the regularization function. It should look similar to figure 9.1b\n",
|
||||
"draw_reg_function()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PFl9zWzLNjuK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Code to draw loss function with regularization\n",
|
||||
"def draw_loss_function_reg(data, model, lambda_, my_colormap, phi_iters = None):\n",
|
||||
"\n",
|
||||
" # Make grid of intercept/slope values to plot\n",
|
||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||
" # Compute loss for every set of parameters\n",
|
||||
" for idslope, slope in np.ndenumerate(freqs_mesh):\n",
|
||||
" loss_mesh[idslope] = compute_loss2(data[0,:], data[1,:], model, np.array([[offsets_mesh[idslope]], [slope]]), lambda_)\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" fig.set_size_inches(8,8)\n",
|
||||
" ax.contourf(offsets_mesh,freqs_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||
" ax.contour(offsets_mesh,freqs_mesh,loss_mesh,20,colors=['#80808080'])\n",
|
||||
" if phi_iters is not None:\n",
|
||||
" ax.plot(phi_iters[0,:], phi_iters[1,:],'go-')\n",
|
||||
" ax.set_ylim([2.5,22.5])\n",
|
||||
" ax.set_xlabel('Offset $\\phi_{0}$'); ax.set_ylabel('Frequency, $\\phi_{1}$')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"# This should look something like figure 9.1c\n",
|
||||
"draw_loss_function_reg(data, model, 0.2, my_colormap)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "mQdEWCQdN5Mt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- Experiment with different values of the regularization weight lambda_\n",
|
||||
"# What do you predict will happen when it is very small (e.g. 0.01)?\n",
|
||||
"# What do you predict will happen when it is large (e.g, 1.0)?\n",
|
||||
"# What happens to the loss at the global minimum when we add the regularization term?\n",
|
||||
"# Does it go up? Go down? Stay the same?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "da047xjZQqj6"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll compute the derivatives $\\frac{\\partial L'}{\\partial\\phi_0}$ and $\\frac{\\partial L'}{\\partial\\phi_1}$ of the regularized loss function:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"L'[\\boldsymbol\\phi] = L[\\boldsymbol\\phi]+ \\lambda\\cdot \\Bigl(\\phi_{0}^2+(\\phi_1-12.5)^2\\Bigr)\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"so that we can perform gradient descent."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "z7k0QHRNRwtD"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def dldphi0(phi, lambda_):\n",
|
||||
" # TODO compute the derivative with respect to phi0\n",
|
||||
" # Replace this line:]\n",
|
||||
" deriv = 0\n",
|
||||
"\n",
|
||||
" return deriv\n",
|
||||
"\n",
|
||||
"def dldphi1(phi, lambda_):\n",
|
||||
" # TODO compute the derivative with respect to phi1\n",
|
||||
" # Replace this line:]\n",
|
||||
" deriv = 0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return deriv\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def compute_gradient2(data_x, data_y, phi, lambda_):\n",
|
||||
" dl_dphi0 = gabor_deriv_phi0(data_x, data_y, phi[0],phi[1])+dldphi0(np.squeeze(phi), lambda_)\n",
|
||||
" dl_dphi1 = gabor_deriv_phi1(data_x, data_y, phi[0],phi[1])+dldphi1(np.squeeze(phi), lambda_)\n",
|
||||
" # Return the gradient\n",
|
||||
" return np.array([[dl_dphi0],[dl_dphi1]])\n",
|
||||
"\n",
|
||||
"def gradient_descent_step2(phi, lambda_, data, model):\n",
|
||||
" # Step 1: Compute the gradient\n",
|
||||
" gradient = compute_gradient2(data[0,:],data[1,:], phi, lambda_)\n",
|
||||
" # Step 2: Update the parameters -- note we want to search in the negative (downhill direction)\n",
|
||||
" alpha = 0.1\n",
|
||||
" phi = phi - alpha * gradient\n",
|
||||
" return phi"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0OStdqo3Rv0a"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Finally, let's run gradient descent and draw the result\n",
|
||||
"# Initialize the parameters\n",
|
||||
"n_steps = 41\n",
|
||||
"phi_all = np.zeros((2,n_steps+1))\n",
|
||||
"phi_all[0,0] = 2.6\n",
|
||||
"phi_all[1,0] = 8.5\n",
|
||||
"lambda_ = 0.2\n",
|
||||
"\n",
|
||||
"# Measure loss and draw initial model\n",
|
||||
"loss = compute_loss2(data[0,:], data[1,:], model, phi_all[:,0:1], lambda_)\n",
|
||||
"draw_model(data,model,phi_all[:,0:1], \"Initial parameters, Loss = %f\"%(loss))\n",
|
||||
"\n",
|
||||
"for c_step in range (n_steps):\n",
|
||||
" # Do gradient descent step\n",
|
||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step2(phi_all[:,c_step:c_step+1],lambda_, data, model)\n",
|
||||
" # Measure loss and draw model every 4th step\n",
|
||||
" if c_step % 8 == 0:\n",
|
||||
" loss = compute_loss2(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2], lambda_)\n",
|
||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||
"\n",
|
||||
"draw_loss_function_reg(data, model, lambda_, my_colormap, phi_all)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "c_V-Gv5hWgTE"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You should see that the gradient descent algorithm now finds the correct minimum. By applying a tiny bit of domain knowledge (the parameter phi0 tends to be near zero and the parameters phi1 tends to be near 12.5), we get a better solution. However, the cost is that this solution is slightly biased towards this prior knowledge."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wrszSLrqZG4k"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
402
Notebooks/Chap09/9_2_Implicit_Regularization.ipynb
Normal file
402
Notebooks/Chap09/9_2_Implicit_Regularization.ipynb
Normal file
File diff suppressed because one or more lines are too long
326
Notebooks/Chap09/9_3_Ensembling.ipynb
Normal file
326
Notebooks/Chap09/9_3_Ensembling.ipynb
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNuR7X+PMWRddy+WQr4gr5f",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_3_Ensembling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 9.3: Ensembling**\n",
|
||||
"\n",
|
||||
"This notebook investigates how ensembling can improve the performance of models. We'll work with the simplified neural network model (figure 8.4 of book) which we can fit in closed form, and so we can eliminate any errors due to not finding the global maximum.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Define seed so get same results each time\n",
|
||||
"np.random.seed(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# The true function that we are trying to estimate, defined on [0,1]\n",
|
||||
"def true_function(x):\n",
|
||||
" y = np.exp(np.sin(x*(2*3.1413)))\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3hpqmFyQNrbt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate some data points with or without noise\n",
|
||||
"def generate_data(n_data, sigma_y=0.3):\n",
|
||||
" # Generate x values quasi uniformly\n",
|
||||
" x = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
|
||||
"\n",
|
||||
" # y value from running through functoin and adding noise\n",
|
||||
" y = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" y[i] = true_function(x[i])\n",
|
||||
" y[i] += np.random.normal(0, sigma_y, 1)\n",
|
||||
" return x,y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "skZMM5TbNwq4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(x_func, y_func, 'k-')\n",
|
||||
" if sigma_func is not None:\n",
|
||||
" ax.fill_between(x_func, y_func-2*sigma_func, y_func+2*sigma_func, color='lightgray')\n",
|
||||
"\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax.plot(x_data, y_data, 'o', color='#d18362')\n",
|
||||
"\n",
|
||||
" if x_model is not None:\n",
|
||||
" ax.plot(x_model, y_model, '-', color='#7fe7de')\n",
|
||||
"\n",
|
||||
" if sigma_model is not None:\n",
|
||||
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
||||
"\n",
|
||||
" ax.set_xlim(0,1)\n",
|
||||
" ax.set_xlabel('Input, $x$')\n",
|
||||
" ax.set_ylabel('Output, $y$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ziwD_R7lN0DY"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate true function\n",
|
||||
"x_func = np.linspace(0, 1.0, 100)\n",
|
||||
"y_func = true_function(x_func);\n",
|
||||
"\n",
|
||||
"# Generate some data points\n",
|
||||
"np.random.seed(1)\n",
|
||||
"sigma_func = 0.3\n",
|
||||
"n_data = 15\n",
|
||||
"x_data,y_data = generate_data(n_data, sigma_func)\n",
|
||||
"\n",
|
||||
"# Plot the functinon, data and uncertainty\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2CgKanwaN3NM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define model -- beta is a scalar and omega has size n_hidden,1\n",
|
||||
"def network(x, beta, omega):\n",
|
||||
" # Retrieve number of hidden units\n",
|
||||
" n_hidden = omega.shape[0]\n",
|
||||
"\n",
|
||||
" y = np.zeros_like(x)\n",
|
||||
" for c_hidden in range(n_hidden):\n",
|
||||
" # Evaluate activations based on shifted lines (figure 8.4b-d)\n",
|
||||
" line_vals = x - c_hidden/n_hidden\n",
|
||||
" h = line_vals * (line_vals > 0)\n",
|
||||
" # Weight activations by omega parameters and sum\n",
|
||||
" y = y + omega[c_hidden] * h\n",
|
||||
" # Add bias, beta\n",
|
||||
" y = y + beta\n",
|
||||
"\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gorZ6i97N7AR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# This fits the n_hidden+1 parameters (see fig 8.4a) in closed form.\n",
|
||||
"# If you have studied linear algebra, then you will know it is a least\n",
|
||||
"# squares solution of the form (A^TA)^-1A^Tb. If you don't recognize that,\n",
|
||||
"# then just take it on trust that this gives you the best possible solution.\n",
|
||||
"def fit_model_closed_form(x,y,n_hidden):\n",
|
||||
" n_data = len(x)\n",
|
||||
" A = np.ones((n_data, n_hidden+1))\n",
|
||||
" for i in range(n_data):\n",
|
||||
" for j in range(1,n_hidden+1):\n",
|
||||
" A[i,j] = x[i]-(j-1)/n_hidden\n",
|
||||
" if A[i,j] < 0:\n",
|
||||
" A[i,j] = 0;\n",
|
||||
"\n",
|
||||
" # Add a tiny bit of regularization\n",
|
||||
" reg_value = 0.00001\n",
|
||||
" regMat = reg_value * np.identity(n_hidden+1)\n",
|
||||
" regMat[0,0] = 0\n",
|
||||
"\n",
|
||||
" ATA = np.matmul(np.transpose(A), A) +regMat\n",
|
||||
" ATAInv = np.linalg.inv(ATA)\n",
|
||||
" ATAInvAT = np.matmul(ATAInv, np.transpose(A))\n",
|
||||
" beta_omega = np.matmul(ATAInvAT,y)\n",
|
||||
" beta = beta_omega[0]\n",
|
||||
" omega = beta_omega[1:]\n",
|
||||
"\n",
|
||||
" return beta, omega"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bMrLZUIqOwiM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Closed form solution\n",
|
||||
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=14)\n",
|
||||
"\n",
|
||||
"# Get prediction for model across graph grange\n",
|
||||
"x_model = np.linspace(0,1,100);\n",
|
||||
"y_model = network(x_model, beta, omega)\n",
|
||||
"\n",
|
||||
"# Draw the function and the model\n",
|
||||
"plot_function(x_func, y_func, x_data,y_data, x_model, y_model)\n",
|
||||
"\n",
|
||||
"# Compute the mean squared error between the fitted model (cyan) and the true curve (black)\n",
|
||||
"mean_sq_error = np.mean((y_model-y_func) * (y_model-y_func))\n",
|
||||
"print(f\"Mean square error = {mean_sq_error:3.3f}\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "mzmtdY8DOz16"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's resample the data with replacement four times.\n",
|
||||
"n_model = 4\n",
|
||||
"# Array to store the prediction from all of our models\n",
|
||||
"all_y_model = np.zeros((n_model, len(y_model)))\n",
|
||||
"\n",
|
||||
"# For each model\n",
|
||||
"for c_model in range(n_model):\n",
|
||||
" # TODO Sample data indices with replacement (use np.random.choice)\n",
|
||||
" # Replace this line\n",
|
||||
" resampled_indices = np.arange(0,n_data,1);\n",
|
||||
"\n",
|
||||
" # Extract the resampled x and y data\n",
|
||||
" x_data_resampled = x_data[resampled_indices]\n",
|
||||
" y_data_resampled = y_data[resampled_indices]\n",
|
||||
"\n",
|
||||
" # Fit the model\n",
|
||||
" beta, omega = fit_model_closed_form(x_data_resampled,y_data_resampled,n_hidden=14)\n",
|
||||
"\n",
|
||||
" # Run the model\n",
|
||||
" y_model_resampled = network(x_model, beta, omega)\n",
|
||||
"\n",
|
||||
" # Store the results\n",
|
||||
" all_y_model[c_model,:] = y_model_resampled\n",
|
||||
"\n",
|
||||
" # Draw the function and the model\n",
|
||||
" plot_function(x_func, y_func, x_data,y_data, x_model, y_model_resampled)\n",
|
||||
"\n",
|
||||
" # Compute the mean squared error between the fitted model (cyan) and the true curve (black)\n",
|
||||
" mean_sq_error = np.mean((y_model_resampled-y_func) * (y_model_resampled-y_func))\n",
|
||||
" print(f\"Mean square error = {mean_sq_error:3.3f}\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UKrAOEiKO8Go"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the median of the results\n",
|
||||
"# TODO -- find the median prediction\n",
|
||||
"# Replace this line\n",
|
||||
"y_model_median = all_y_model[0,:]\n",
|
||||
"\n",
|
||||
"# Draw the function and the model\n",
|
||||
"plot_function(x_func, y_func, x_data,y_data, x_model, y_model_median)\n",
|
||||
"\n",
|
||||
"# Compute the mean squared error between the fitted model (cyan) and the true curve (black)\n",
|
||||
"mean_sq_error = np.mean((y_model_median-y_func) * (y_model_median-y_func))\n",
|
||||
"print(f\"Mean square error = {mean_sq_error:3.3f}\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "cUTaW_GMS6nb"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the median of the results\n",
|
||||
"# TODO -- find the mean prediction\n",
|
||||
"# Replace this line\n",
|
||||
"y_model_mean = all_y_model[0,:]\n",
|
||||
"\n",
|
||||
"# Draw the function and the model\n",
|
||||
"plot_function(x_func, y_func, x_data,y_data, x_model, y_model_mean)\n",
|
||||
"\n",
|
||||
"# Compute the mean squared error between the fitted model (cyan) and the true curve (black)\n",
|
||||
"mean_sq_error = np.mean((y_model_mean-y_func) * (y_model_mean-y_func))\n",
|
||||
"print(f\"Mean square error = {mean_sq_error:3.3f}\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2MKxwVxuRvCx"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You should see that both the median and mean models are better than any of the individual models. We have improved our performance at the cost of four times as much training time, storage, and inference time."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K-jDZrfjWwBa"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
424
Notebooks/Chap09/9_4_Bayesian_Approach.ipynb
Normal file
424
Notebooks/Chap09/9_4_Bayesian_Approach.ipynb
Normal file
@@ -0,0 +1,424 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMB8B4269DVmrcLoCWrhzKF",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 9.4: Bayesian approach**\n",
|
||||
"\n",
|
||||
"This notebook investigates the Bayesian approach to model fitting and reproduces figure 9.11 from the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "xhmIOLiZELV_"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# import libraries\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Define seed so get same results each time\n",
|
||||
"np.random.seed(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# The true function that we are trying to estimate, defined on [0,1]\n",
|
||||
"def true_function(x):\n",
|
||||
" y = np.exp(np.sin(x*(2*3.1413)))\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3hpqmFyQNrbt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate some data points with or without noise\n",
|
||||
"def generate_data(n_data, sigma_y=0.3):\n",
|
||||
" # Generate x values quasi uniformly\n",
|
||||
" x = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
|
||||
"\n",
|
||||
" # y value from running through functoin and adding noise\n",
|
||||
" y = np.ones(n_data)\n",
|
||||
" for i in range(n_data):\n",
|
||||
" y[i] = true_function(x[i])\n",
|
||||
" y[i] += np.random.normal(0, sigma_y, 1)\n",
|
||||
" return x,y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "skZMM5TbNwq4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(x_func, y_func, 'k-')\n",
|
||||
" if sigma_func is not None:\n",
|
||||
" ax.fill_between(x_func, y_func-2*sigma_func, y_func+2*sigma_func, color='lightgray')\n",
|
||||
"\n",
|
||||
" if x_data is not None:\n",
|
||||
" ax.plot(x_data, y_data, 'o', color='#d18362')\n",
|
||||
"\n",
|
||||
" if x_model is not None:\n",
|
||||
" ax.plot(x_model, y_model, '-', color='#7fe7de')\n",
|
||||
"\n",
|
||||
" if sigma_model is not None:\n",
|
||||
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
||||
"\n",
|
||||
" ax.set_xlim(0,1)\n",
|
||||
" ax.set_xlabel('Input, $x$')\n",
|
||||
" ax.set_ylabel('Output, $y$')\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ziwD_R7lN0DY"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Generate true function\n",
|
||||
"x_func = np.linspace(0, 1.0, 100)\n",
|
||||
"y_func = true_function(x_func);\n",
|
||||
"\n",
|
||||
"# Generate some data points\n",
|
||||
"np.random.seed(1)\n",
|
||||
"sigma_func = 0.3\n",
|
||||
"n_data = 15\n",
|
||||
"x_data,y_data = generate_data(n_data, sigma_func)\n",
|
||||
"\n",
|
||||
"# Plot the functinon, data and uncertainty\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2CgKanwaN3NM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define model -- beta is a scalar and omega has size n_hidden,1\n",
|
||||
"def network(x, beta, omega):\n",
|
||||
" # Retrieve number of hidden units\n",
|
||||
" n_hidden = omega.shape[0]\n",
|
||||
"\n",
|
||||
" y = np.zeros_like(x)\n",
|
||||
" for c_hidden in range(n_hidden):\n",
|
||||
" # Evaluate activations based on shifted lines (figure 8.4b-d)\n",
|
||||
" line_vals = x - c_hidden/n_hidden\n",
|
||||
" h = line_vals * (line_vals > 0)\n",
|
||||
" # Weight activations by omega parameters and sum\n",
|
||||
" y = y + omega[c_hidden] * h\n",
|
||||
" # Add bias, beta\n",
|
||||
" y = y + beta\n",
|
||||
"\n",
|
||||
" return y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gorZ6i97N7AR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute a probability distribution over the model parameters using Bayes's rule:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
" Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) = \\frac{\\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi) Pr(\\boldsymbol\\phi)}{\\int \\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi) Pr(\\boldsymbol\\phi)d\\boldsymbol\\phi } ,\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"We'll define the prior $Pr(\\boldsymbol\\phi)$ as:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"Pr(\\boldsymbol\\phi) = \\mbox{Norm}_{\\boldsymbol\\phi}\\bigl[\\mathbf{0},\\sigma^2_p\\mathbf{I}\\bigr]\n",
|
||||
"\\end{equation}\n",
|
||||
"\n",
|
||||
"where $\\phi=[\\omega_1,\\omega_2\\ldots \\omega_n, \\beta]^T$ and $\\sigma^2_{p}$ is the prior variance.\n",
|
||||
"\n",
|
||||
"The likelihood term $\\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi)$ is given by:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi) &=& \\prod_{i=1}^{I} \\mbox{Norm}_{y_i}\\bigl[\\mbox{f}[\\mathbf{x}_{i},\\boldsymbol\\phi],\\sigma_d^2\\bigr]\\\\\n",
|
||||
"&=& \\prod_{i=1}^{I} \\mbox{Norm}_{y_i}\\bigl[\\boldsymbol\\omega\\mathbf{h}_i+\\beta,\\sigma_d^2\\bigr]\\\\\n",
|
||||
"&=& \\mbox{Norm}_{\\mathbf{y}}\\bigl[\\mathbf{H}^T\\boldsymbol\\phi,\\sigma^2\\mathbf{I}\\bigr].\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"where $\\sigma^2$ is the measurement noise and $\\mathbf{h}_{i}$ is the column vector of hidden variables for the $i^{th}$ input. Here the vector $\\mathbf{y}$ and matrix $\\mathbf{H}$ are defined as:\n",
|
||||
"\n",
|
||||
"\\begin{equation}\n",
|
||||
"\\mathbf{y} = \\begin{bmatrix}y_1\\\\y_2\\\\\\vdots\\\\y_{I}\\end{bmatrix}\\quad\\mbox{and}\\quad \\mathbf{H} = \\begin{bmatrix}\\mathbf{h}_{1}&\\mathbf{h}_{2}&\\cdots&\\mathbf{h}_{I}\\\\1&1&\\cdots &1\\end{bmatrix}.\n",
|
||||
"\\end{equation}\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "i8T_QduzeBmM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"To make progress we use the change of variable relation (Appendix C.3.4 of the book) to rewrite the likelihood term as a normal distribution in the parameters $\\boldsymbol\\phi$:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"\\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi+\\beta)\n",
|
||||
"&=& \\mbox{Norm}_{\\mathbf{y}}\\bigl[\\mathbf{H}^T\\boldsymbol\\phi,\\sigma^2\\bigr]\\\\\n",
|
||||
"&\\propto& \\mbox{Norm}_{\\boldsymbol\\phi}\\bigl[(\\mathbf{H}\\mathbf{H}^T)^{-1}\\mathbf{H}\\mathbf{y},\\sigma^2(\\mathbf{H}\\mathbf{H}^t)^{-1}\\bigr]\n",
|
||||
"\\end{eqnarray}\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JojV6ueRk49G"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, we can combine the likelihood and prior terms using the product of two normal distributions relation (Appendix C.3.3).\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
" Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) &\\propto& \\prod_{i=1}^{I} Pr(\\mathbf{y}_{i}|\\mathbf{x}_{i},\\boldsymbol\\phi) Pr(\\boldsymbol\\phi)\\\\\n",
|
||||
" &\\propto&\\mbox{Norm}_{\\boldsymbol\\phi}\\bigl[(\\mathbf{H}\\mathbf{H}^T)^{-1}\\mathbf{H}\\mathbf{y},\\sigma^2(\\mathbf{H}\\mathbf{H}^T)^{-1}\\bigr] \\mbox{Norm}_{\\boldsymbol\\phi}\\bigl[\\mathbf{0},\\sigma^2_p\\mathbf{I}\\bigr]\\\\\n",
|
||||
" &\\propto&\\mbox{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"In fact, since this already a normal distribution, the constant of proportionality must be one and we can write\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
" Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) &=& \\mbox{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"TODO -- On a piece of paper, use the relations in Appendix C.3.3 and C.3.4 to fill in the missing steps and establish that this is the correct formula for the posterior."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YX0O_Ciwp4W1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def compute_H(x_data, n_hidden):\n",
|
||||
" psi1 = np.ones((n_hidden+1,1));\n",
|
||||
" psi0 = np.linspace(0.0, 1.0, num=n_hidden, endpoint=False) * -1\n",
|
||||
"\n",
|
||||
" n_data = x_data.size\n",
|
||||
" # First compute the hidden variables\n",
|
||||
" H = np.ones((n_hidden+1, n_data))\n",
|
||||
" for i in range(n_hidden):\n",
|
||||
" for j in range(n_data):\n",
|
||||
" # Compute preactivation\n",
|
||||
" H[i,j] = psi1[i] * x_data[j]+psi0[i]\n",
|
||||
" # Apply ReLU to get activation\n",
|
||||
" if H[i,j] < 0:\n",
|
||||
" H[i,j] = 0;\n",
|
||||
"\n",
|
||||
" return H\n",
|
||||
"\n",
|
||||
"def compute_param_mean_covar(x_data, y_data, n_hidden, sigma_sq, sigma_p_sq):\n",
|
||||
" # Retrieve the matrix containing the hidden variables\n",
|
||||
" H = compute_H(x_data, n_hidden) ;\n",
|
||||
"\n",
|
||||
" # TODO -- Compute the covariance matrix (you will need np.transpose(), np.matmul(), np.linalg.inv())\n",
|
||||
" # Replace this line\n",
|
||||
" phi_covar = np.identity(n_hidden+1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # TODO -- Compute the mean matrix\n",
|
||||
" # Replace this line\n",
|
||||
" phi_mean = np.zeros((n_hidden+1,1))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return phi_mean, phi_covar"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nF1AcgNDwm4t"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we can draw samples from this distribution"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GjPnlG4q0UFK"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define parameters\n",
|
||||
"n_hidden = 5\n",
|
||||
"sigma_sq = sigma_func * sigma_func\n",
|
||||
"# Arbitrary large value reflecting the fact we are uncertain about the\n",
|
||||
"# parameters before we see any data\n",
|
||||
"sigma_p_sq = 1000\n",
|
||||
"\n",
|
||||
"# Compute the mean and covariance matrix\n",
|
||||
"phi_mean, phi_covar = compute_param_mean_covar(x_data, y_data, n_hidden, sigma_sq, sigma_p_sq)\n",
|
||||
"\n",
|
||||
"# Let's draw the mean model\n",
|
||||
"x_model = x_func\n",
|
||||
"y_model_mean = network(x_model, phi_mean[-1], phi_mean[0:n_hidden])\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, x_model, y_model_mean)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K4vYc82D0BMq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Draw two samples from the normal distribution over the parameters\n",
|
||||
"# Replace these lines\n",
|
||||
"phi_sample1 = np.zeros((n_hidden+1,1))\n",
|
||||
"phi_sample2 = np.zeros((n_hidden+1,1))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Run the network for these two sample sets of parameters\n",
|
||||
"y_model_sample1 = network(x_model, phi_sample1[-1], phi_sample1[0:n_hidden])\n",
|
||||
"y_model_sample2 = network(x_model, phi_sample2[-1], phi_sample2[0:n_hidden])\n",
|
||||
"\n",
|
||||
"# Draw the two models\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, x_model, y_model_sample1)\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, x_model, y_model_sample2)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TVIjhubkSw-R"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we need to perform inference for a new data points $\\mathbf{x}^*$ with corresponding hidden values $\\mathbf{h}^*$. Instead of having a single estimate of the parameters, we have a distribution over the possible parameters. So we marginalize (integrate) over this distribution to account for all possible values:\n",
|
||||
"\n",
|
||||
"\\begin{eqnarray}\n",
|
||||
"Pr(y^*|\\mathbf{x}^*) &=& \\int Pr(y^{*}|\\mathbf{x}^*,\\boldsymbol\\phi)Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) d\\boldsymbol\\phi\\\\\n",
|
||||
"&=& \\int \\mbox{Norm}_{y^*}\\bigl[\\begin{bmatrix}\\mathbf{h}^{*T}&1\\end{bmatrix}\\boldsymbol\\phi,\\sigma^2]\\cdot\\mbox{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr]d\\boldsymbol\\phi\\\\\n",
|
||||
"&=& \\mbox{Norm}_{y^*}\\biggl[\\frac{1}{\\sigma^2} \\begin{bmatrix}\\mathbf{h}^{*T}&1\\end{bmatrix}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y}, \\begin{bmatrix}\\mathbf{h}^{*T}&1\\end{bmatrix}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\n",
|
||||
"\\begin{bmatrix}\\mathbf{h}^*\\\\1\\end{bmatrix}\\biggr]\n",
|
||||
"\\end{eqnarray}\n",
|
||||
"\n",
|
||||
"To compute this, we reformulated the integrand using the relations from appendices\n",
|
||||
"C.3.3 and C.3.4 as the product of a normal distribution in $\\boldsymbol\\phi$ and a constant with respect\n",
|
||||
"to $\\boldsymbol\\phi$. The integral of the normal distribution must be one, and so the finnal result is just the constant. This constant is itself a normal distribution in $y^*$. <br>\n",
|
||||
"\n",
|
||||
"If you feel so inclined you can work through the math of this yourself."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GiNg5EroUiUb"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Predict mean and variance of y_star from x_star\n",
|
||||
"def inference(x_star, x_data, y_data, sigma_sq, sigma_p_sq, n_hidden):\n",
|
||||
"\n",
|
||||
" # Compute hidden variables\n",
|
||||
" h_star = compute_H(x_star, n_hidden);\n",
|
||||
" H = compute_H(x_data, n_hidden);\n",
|
||||
"\n",
|
||||
" # TODO: Compute mean and variance of y*\n",
|
||||
" # Replace these lines:\n",
|
||||
" y_star_mean = 0\n",
|
||||
" y_star_var = 1\n",
|
||||
"\n",
|
||||
" return y_star_mean, y_star_var"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ILxT4EfW2lUm"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"x_model = x_func\n",
|
||||
"y_model = np.zeros_like(x_model)\n",
|
||||
"y_model_std = np.zeros_like(x_model)\n",
|
||||
"for c_model in range(len(x_model)):\n",
|
||||
" y_star_mean, y_star_var = inference(x_model[c_model]*np.ones((1,1)), x_data, y_data, sigma_sq, sigma_p_sq, n_hidden)\n",
|
||||
" y_model[c_model] = y_star_mean\n",
|
||||
" y_model_std[c_model] = np.sqrt(y_star_var)\n",
|
||||
"\n",
|
||||
"# Draw the model\n",
|
||||
"plot_function(x_func, y_func, x_data, y_data, x_model, y_model, sigma_model=y_model_std)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "87cjUjMaixHZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"TODO:\n",
|
||||
"\n",
|
||||
"1. Experiment running this again with different numbers of hidden units. Make a prediction for what will happen when you increase / decrease them.\n",
|
||||
"2. Experiment with what happens if you make the prior variance $\\sigma^2_p$ to a smaller value like 1. How do you explain the results?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8Hcbe_16sK0F"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
346
Notebooks/Chap09/9_5_Augmentation.ipynb
Normal file
346
Notebooks/Chap09/9_5_Augmentation.ipynb
Normal file
@@ -0,0 +1,346 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyM3wq9CHLjekkIXIgXRxueE",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_5_Augmentation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 9.5: Augmentation**\n",
|
||||
"\n",
|
||||
"This notebook investigates data augmentation for the MNIST-1D model.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "syvgxgRr3myY"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ckrNsYd13pMe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "D_Woo9U730lZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"D_i = 40 # Input dimensions\n",
|
||||
"D_k = 200 # Hidden dimensions\n",
|
||||
"D_o = 10 # Output dimensions\n",
|
||||
"\n",
|
||||
"# Define a model with two hidden layers of size 100\n",
|
||||
"# And ReLU activations between them\n",
|
||||
"model = nn.Sequential(\n",
|
||||
"nn.Linear(D_i, D_k),\n",
|
||||
"nn.ReLU(),\n",
|
||||
"nn.Linear(D_k, D_k),\n",
|
||||
"nn.ReLU(),\n",
|
||||
"nn.Linear(D_k, D_o))\n",
|
||||
"\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" # Initialize the parameters with He initialization\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)\n",
|
||||
"\n",
|
||||
"# Call the function you just defined\n",
|
||||
"model.apply(weights_init)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JfIFWFIL33eF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# choose cross entropy loss function (equation 5.24)\n",
|
||||
"loss_function = torch.nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 10 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
|
||||
"# create 100 dummy data points and store in data loader class\n",
|
||||
"x_train = torch.tensor(data['x'].astype('float32'))\n",
|
||||
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
||||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||
"y_test = torch.tensor(data['y_test'].astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 50\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"errors_train = np.zeros((n_epoch))\n",
|
||||
"errors_test = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, batch in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = batch\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_test = model(x_test)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n",
|
||||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_test[epoch]= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n",
|
||||
" print(f'Epoch {epoch:5d}, train error {errors_train[epoch]:3.2f}, test error {errors_test[epoch]:3.2f}')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YFfVbTPE4BkJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_test,'b-',label='test')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train[-1],errors_test[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FmGDd4vB8LyM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The best test performance is about 33%. Let's see if we can improve on that by augmenting the data."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "55XvoPDO8Qp-"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def augment(data_in):\n",
|
||||
" # Create output vector\n",
|
||||
" data_out = np.zeros_like(data_in)\n",
|
||||
"\n",
|
||||
" # TODO: Shift the input data by a random offset\n",
|
||||
" # (rotating, so points that would go off the end, are added back to the beginning)\n",
|
||||
" # Replace this line:\n",
|
||||
" data_out = np.zeros_like(data_in) ;\n",
|
||||
"\n",
|
||||
" # TODO: # Randomly scale the data by a factor drawn from a uniform distribution over [0.8,1.2]\n",
|
||||
" # Replace this line:\n",
|
||||
" data_out = np.array(data_out)\n",
|
||||
"\n",
|
||||
" return data_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IP6z2iox8MOF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"n_data_orig = data['x'].shape[0]\n",
|
||||
"# We'll double the amount o fdata\n",
|
||||
"n_data_augment = n_data_orig+4000\n",
|
||||
"augmented_x = np.zeros((n_data_augment, D_i))\n",
|
||||
"augmented_y = np.zeros(n_data_augment)\n",
|
||||
"# First n_data_orig rows are original data\n",
|
||||
"augmented_x[0:n_data_orig,:] = data['x']\n",
|
||||
"augmented_y[0:n_data_orig] = data['y']\n",
|
||||
"\n",
|
||||
"# Fill in rest of with augmented data\n",
|
||||
"for c_augment in range(n_data_orig, n_data_augment):\n",
|
||||
" # Choose a data point randomly\n",
|
||||
" random_data_index = random.randint(0, n_data_orig-1)\n",
|
||||
" # Augment the point and store\n",
|
||||
" augmented_x[c_augment,:] = augment(data['x'][random_data_index,:])\n",
|
||||
" augmented_y[c_augment] = data['y'][random_data_index]\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bzN0lu5J95AJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# choose cross entropy loss function (equation 5.24)\n",
|
||||
"loss_function = torch.nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 50 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
|
||||
"# create 100 dummy data points and store in data loader class\n",
|
||||
"x_train = torch.tensor(augmented_x.astype('float32'))\n",
|
||||
"y_train = torch.tensor(augmented_y.transpose().astype('long'))\n",
|
||||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||
"y_test = torch.tensor(data['y_test'].astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 50\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"errors_train_aug = np.zeros((n_epoch))\n",
|
||||
"errors_test_aug = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, batch in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = batch\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_test = model(x_test)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n",
|
||||
" errors_train_aug[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_test_aug[epoch]= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n",
|
||||
" print(f'Epoch {epoch:5d}, train error {errors_train_aug[epoch]:3.2f}, test error {errors_test_aug[epoch]:3.2f}')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hZUNrXpS_kRs"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_test,'b-',label='test')\n",
|
||||
"ax.plot(errors_test_aug,'g-',label='test (augmented)')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train_aug[-1],errors_test_aug[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IcnAW4ixBnuc"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Hopefully, you should see an improvement in performance when we augment the data."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jgsR7ScJHc9b"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
376
Notebooks/Chap10/10_1_1D_Convolution.ipynb
Normal file
376
Notebooks/Chap10/10_1_1D_Convolution.ipynb
Normal file
@@ -0,0 +1,376 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyPHUNRkJMI5LujaxIXNV60m",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap10/10_1_1D_Convolution.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 10.1: 1D Convolution**\n",
|
||||
"\n",
|
||||
"This notebook investigates 1D convolutional layers.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "el8l05WQEO46"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nw7k5yCtOzoK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a signal that we can apply convolution to\n",
|
||||
"x = [5.2, 5.3, 5.4, 5.1, 10.1, 10.3, 9.9, 10.3, 3.2, 3.4, 3.3, 3.1]"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lSSHuoEqO3Ly"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Draw the signal\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(x, 'k-')\n",
|
||||
"ax.set_xlim(0,11)\n",
|
||||
"ax.set_ylim(0, 12)\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zVssv_wiREc2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's define a zero-padded convolution operation\n",
|
||||
"# with a convolution kernel size of 3, a stride of 1, and a dilation of 0\n",
|
||||
"# as in figure 10.2a-c. Write it yourself, don't call a library routine!\n",
|
||||
"# Don't forget that Python arrays are indexed from zero, not from 1 as in the book figures\n",
|
||||
"def conv_3_1_0_zp(x_in, omega):\n",
|
||||
" x_out = np.zeros_like(x_in)\n",
|
||||
" # TODO -- write this function\n",
|
||||
" # replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MmfXED12RvNq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's see what kind of things convolution can do\n",
|
||||
"First, it can average nearby values, smoothing the function:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fof_Rs98Zovq"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"omega = [0.33,0.33,0.33]\n",
|
||||
"h = conv_3_1_0_zp(x, omega)\n",
|
||||
"\n",
|
||||
"# Check that you have computed this correctly\n",
|
||||
"print(f\"Sum of output is {np.sum(h):3.3}, should be 71.1\")\n",
|
||||
"\n",
|
||||
"# Draw the signal\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(x, 'k-',label='before')\n",
|
||||
"ax.plot(h, 'r-',label='after')\n",
|
||||
"ax.set_xlim(0,11)\n",
|
||||
"ax.set_ylim(0, 12)\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HOcPZR6iWXsa"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Notice how the red function is a smoothed version of the black one as it has averaged adjacent values. The first and last outputs are considerably lower than the original curve though. Make sure that you understand why!<br><br>\n",
|
||||
"\n",
|
||||
"With different weights, the convolution can be used to find sharp changes in the function:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PBkNKUylZr-k"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"omega = [-0.5,0,0.5]\n",
|
||||
"h2 = conv_3_1_0_zp(x, omega)\n",
|
||||
"\n",
|
||||
"# Draw the signal\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(x, 'k-',label='before')\n",
|
||||
"ax.plot(h2, 'r-',label='after')\n",
|
||||
"ax.set_xlim(0,11)\n",
|
||||
"# ax.set_ylim(0, 12)\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "o8T5WKeuZrgS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Notice that the convolution has a peak where the original function went up and trough where it went down. It is roughly zero where the function is locally flat. This convolution approximates a derivative. <br> <br>\n",
|
||||
"\n",
|
||||
"Now let's define the convolutions from figure 10.3. "
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ogfCVThJgtPx"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's define a zero-padded convolution operation\n",
|
||||
"# with a convolution kernel size of 3, a stride of 2, and a dilation of 0\n",
|
||||
"# as in figure 10.2a-c. Write it yourself, don't call a library routine!\n",
|
||||
"def conv_3_2_0_zp(x_in, omega):\n",
|
||||
" x_out = np.zeros(int(np.ceil(len(x_in)/2)))\n",
|
||||
" # TODO -- write this function\n",
|
||||
" # replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5QYrQmFMiDBj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"omega = [0.33,0.33,0.33]\n",
|
||||
"h3 = conv_3_2_0_zp(x, omega)\n",
|
||||
"\n",
|
||||
"# If you have done this right, the output length should be six and it should\n",
|
||||
"# contain every other value from the original convolution with stride 1\n",
|
||||
"print(h)\n",
|
||||
"print(h3)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CD96lnDHX72A"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's define a zero-padded convolution operation\n",
|
||||
"# with a convolution kernel size of 5, a stride of 1, and a dilation of 0\n",
|
||||
"# as in figure 10.2a-c. Write it yourself, don't call a library routine!\n",
|
||||
"def conv_5_1_0_zp(x_in, omega):\n",
|
||||
" x_out = np.zeros_like(x_in)\n",
|
||||
" # TODO -- write this function\n",
|
||||
" # replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lw46-gNUjDw7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"omega2 = [0.2, 0.2, 0.2, 0.2, 0.2]\n",
|
||||
"h4 = conv_5_1_0_zp(x, omega2)\n",
|
||||
"\n",
|
||||
"# Check that you have computed this correctly\n",
|
||||
"print(f\"Sum of output is {np.sum(h4):3.3}, should be 69.6\")\n",
|
||||
"\n",
|
||||
"# Draw the signal\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(x, 'k-',label='before')\n",
|
||||
"ax.plot(h4, 'r-',label='after')\n",
|
||||
"ax.set_xlim(0,11)\n",
|
||||
"ax.set_ylim(0, 12)\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JkKBL-nFk4bf"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Finally let's define a zero-padded convolution operation\n",
|
||||
"# with a convolution kernel size of 3, a stride of 1, and a dilation of 1\n",
|
||||
"# as in figure 10.2a-c. Write it yourself, don't call a library routine!\n",
|
||||
"# Don't forget that Python arrays are indexed from zero, not from 1 as in the book figures\n",
|
||||
"def conv_3_1_1_zp(x_in, omega):\n",
|
||||
" x_out = np.zeros_like(x_in)\n",
|
||||
" # TODO -- write this function\n",
|
||||
" # replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_aBcW46AljI0"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"omega = [0.33,0.33,0.33]\n",
|
||||
"h5 = conv_3_1_1_zp(x, omega)\n",
|
||||
"\n",
|
||||
"# Check that you have computed this correctly\n",
|
||||
"print(f\"Sum of output is {np.sum(h5):3.3}, should be 68.3\")\n",
|
||||
"\n",
|
||||
"# Draw the signal\n",
|
||||
"fig,ax = plt.subplots()\n",
|
||||
"ax.plot(x, 'k-',label='before')\n",
|
||||
"ax.plot(h5, 'r-',label='after')\n",
|
||||
"ax.set_xlim(0,11)\n",
|
||||
"ax.set_ylim(0, 12)\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "En-ByCqWlvMI"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, let's investigate representing convolutions as full matrices, and show we get the same answer."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "loBwu125lXx1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Compute matrix in figure 10.4 d\n",
|
||||
"def get_conv_mat_3_1_0_zp(n_out, omega):\n",
|
||||
" omega_mat = np.zeros((n_out,n_out))\n",
|
||||
" # TODO Fill in this matix\n",
|
||||
" # Replace this line:\n",
|
||||
" omega_mat = omega_mat\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return omega_mat"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "U2RFWfGgs72j"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run original convolution\n",
|
||||
"omega = np.array([-1.0,0.5,-0.2])\n",
|
||||
"h6 = conv_3_1_0_zp(x, omega)\n",
|
||||
"print(h6)\n",
|
||||
"\n",
|
||||
"# If you have done this right, you should get the same answer\n",
|
||||
"omega_mat = get_conv_mat_3_1_0_zp(len(x), omega)\n",
|
||||
"h7 = np.matmul(omega_mat, x)\n",
|
||||
"print(h7)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "20IYxku8lMty"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"TODO: What do you expect to happen if we apply the last convolution twice? Can this be represented as a single convolution? If so, then what is it?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "rYoQVhBfu8R4"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
253
Notebooks/Chap10/10_2_Convolution_for_MNIST_1D.ipynb
Normal file
253
Notebooks/Chap10/10_2_Convolution_for_MNIST_1D.ipynb
Normal file
@@ -0,0 +1,253 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOgDisWDe/zHpfTGCH8AZ3i",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap10/10_2_Convolution_for_MNIST_1D.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 10.2: Convolution for MNIST-1D**\n",
|
||||
"\n",
|
||||
"This notebook investigates a 1D convolutional network for MNIST-1D as in figure 10.7 and 10.8a.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "D5yLObtZCi9J"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import os\n",
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "twI72ZCrCt5z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Load in the data\n",
|
||||
"train_data_x = data['x'].transpose()\n",
|
||||
"train_data_y = data['y']\n",
|
||||
"val_data_x = data['x_test'].transpose()\n",
|
||||
"val_data_y = data['y_test']\n",
|
||||
"# Print out sizes\n",
|
||||
"print(\"Train data: %d examples (columns), each of which has %d dimensions (rows)\"%((train_data_x.shape[1],train_data_x.shape[0])))\n",
|
||||
"print(\"Validation data: %d examples (columns), each of which has %d dimensions (rows)\"%((val_data_x.shape[1],val_data_x.shape[0])))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8bKADvLHbiV5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Define the network"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_sFvRDGrl4qe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# There are 40 input dimensions and 10 output dimensions for this data\n",
|
||||
"# The inputs correspond to the 40 offsets in the MNIST1D template.\n",
|
||||
"D_i = 40\n",
|
||||
"# The outputs correspond to the 10 digits\n",
|
||||
"D_o = 10\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# TODO Create a model with the following layers\n",
|
||||
"# 1. Convolutional layer, (input=length 40 and 1 channel, kernel size 3x3, stride 2, padding=\"valid\", 15 output channels )\n",
|
||||
"# 2. ReLU\n",
|
||||
"# 3. Convolutional layer, (input=length 19 and 15 channels, kernel size 3x3, stride 2, padding=\"valid\", 15 output channels )\n",
|
||||
"# 4. ReLU\n",
|
||||
"# 5. Convolutional layer, (input=length 9 and 15 channels, kernel size 3x3, stride 2, padding=\"valid\", 15 output channels)\n",
|
||||
"# 6. ReLU\n",
|
||||
"# 7. Flatten (converts 4x15) to length 60\n",
|
||||
"# 8. Linear layer (input size = 60, output size = 10)\n",
|
||||
"# References:\n",
|
||||
"# https://pytorch.org/docs/1.13/generated/torch.nn.Conv1d.html?highlight=conv1d#torch.nn.Conv1d\n",
|
||||
"# https://pytorch.org/docs/stable/generated/torch.nn.Flatten.html\n",
|
||||
"# https://pytorch.org/docs/1.13/generated/torch.nn.Linear.html?highlight=linear#torch.nn.Linear\n",
|
||||
"\n",
|
||||
"# Replace the following function:\n",
|
||||
"model = nn.Sequential(\n",
|
||||
"nn.Flatten(),\n",
|
||||
"nn.Linear(40, 100),\n",
|
||||
"nn.ReLU(),\n",
|
||||
"nn.Linear(100, 100),\n",
|
||||
"nn.ReLU(),\n",
|
||||
"nn.Linear(100, 10))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FslroPJJffrh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# He initialization of weights\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YgLaex1pfhqz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# choose cross entropy loss function (equation 5.24 in the loss notes)\n",
|
||||
"loss_function = nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 20 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=20, gamma=0.5)\n",
|
||||
"# create 100 dummy data points and store in data loader class\n",
|
||||
"x_train = torch.tensor(train_data_x.transpose().astype('float32'))\n",
|
||||
"y_train = torch.tensor(train_data_y.astype('long'))\n",
|
||||
"x_val= torch.tensor(val_data_x.transpose().astype('float32'))\n",
|
||||
"y_val = torch.tensor(val_data_y.astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 100\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"losses_train = np.zeros((n_epoch))\n",
|
||||
"errors_train = np.zeros((n_epoch))\n",
|
||||
"losses_val = np.zeros((n_epoch))\n",
|
||||
"errors_val = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, data in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = data\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch[:,None,:])\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train[:,None,:])\n",
|
||||
" pred_val = model(x_val[:,None,:])\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_val_class = torch.max(pred_val.data, 1)\n",
|
||||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_val[epoch]= 100 - 100 * (predicted_val_class == y_val).float().sum() / len(y_val)\n",
|
||||
" losses_train[epoch] = loss_function(pred_train, y_train).item()\n",
|
||||
" losses_val[epoch]= loss_function(pred_val, y_val).item()\n",
|
||||
" print(f'Epoch {epoch:5d}, train loss {losses_train[epoch]:.6f}, train error {errors_train[epoch]:3.2f}, val loss {losses_val[epoch]:.6f}, percent error {errors_val[epoch]:3.2f}')\n",
|
||||
"\n",
|
||||
" # tell scheduler to consider updating learning rate\n",
|
||||
" scheduler.step()\n",
|
||||
"\n",
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_val,'b-',label='validation')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('Part I: Validation Result %3.2f'%(errors_val[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NYw8I_3mmX5c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
431
Notebooks/Chap10/10_3_2D_Convolution.ipynb
Normal file
431
Notebooks/Chap10/10_3_2D_Convolution.ipynb
Normal file
@@ -0,0 +1,431 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMmbD0cKYvIHXbKX4AupA1x",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap10/10_3_2D_Convolution.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 10.3: 2D Convolution**\n",
|
||||
"\n",
|
||||
"This notebook investigates the 2D convolution operation. It asks you to hand code the convolution so we can be sure that we are computing the same thing as in PyTorch. The next notebook uses the convolutional layers in PyTorch directly.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VB_crnDGASX-"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import torch\n",
|
||||
"# Set to print in reasonable form\n",
|
||||
"np.set_printoptions(precision=3, floatmode=\"fixed\")\n",
|
||||
"torch.set_printoptions(precision=3)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YAoWDUb_DezG"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"This routine performs convolution in PyTorch"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "eAwYWXzAElHG"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Perform convolution in PyTorch\n",
|
||||
"def conv_pytorch(image, conv_weights, stride=1, pad =1):\n",
|
||||
" # Convert image and kernel to tensors\n",
|
||||
" image_tensor = torch.from_numpy(image) # (batchSize, channelsIn, imageHeightIn, =imageWidthIn)\n",
|
||||
" conv_weights_tensor = torch.from_numpy(conv_weights) # (channelsOut, channelsIn, kernelHeight, kernelWidth)\n",
|
||||
" # Do the convolution\n",
|
||||
" output_tensor = torch.nn.functional.conv2d(image_tensor, conv_weights_tensor, stride=stride, padding=pad)\n",
|
||||
" # Convert back from PyTorch and return\n",
|
||||
" return(output_tensor.numpy()) # (batchSize channelsOut imageHeightOut imageHeightIn)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "xsmUIN-3BlWr"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First we'll start with the simplest 2D convolution. Just one channel in and one channel out. A single image in the batch."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "A3Sm8bUWtDNO"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Perform convolution in numpy\n",
|
||||
"def conv_numpy_1(image, weights, pad=1):\n",
|
||||
"\n",
|
||||
" # Perform zero padding\n",
|
||||
" if pad != 0:\n",
|
||||
" image = np.pad(image, ((0, 0), (0 ,0), (pad, pad), (pad, pad)),'constant')\n",
|
||||
"\n",
|
||||
" # Get sizes of image array and kernel weights\n",
|
||||
" batchSize, channelsIn, imageHeightIn, imageWidthIn = image.shape\n",
|
||||
" channelsOut, channelsIn, kernelHeight, kernelWidth = weights.shape\n",
|
||||
"\n",
|
||||
" # Get size of output arrays\n",
|
||||
" imageHeightOut = np.floor(1 + imageHeightIn - kernelHeight).astype(int)\n",
|
||||
" imageWidthOut = np.floor(1 + imageWidthIn - kernelWidth).astype(int)\n",
|
||||
"\n",
|
||||
" # Create output\n",
|
||||
" out = np.zeros((batchSize, channelsOut, imageHeightOut, imageWidthOut), dtype=np.float32)\n",
|
||||
"\n",
|
||||
" for c_y in range(imageHeightOut):\n",
|
||||
" for c_x in range(imageWidthOut):\n",
|
||||
" for c_kernel_y in range(kernelHeight):\n",
|
||||
" for c_kernel_x in range(kernelWidth):\n",
|
||||
" # TODO -- Retrieve the image pixel and the weight from the convolution\n",
|
||||
" # Only one image in batch, one input channel and one output channel, so these indices should all be zero\n",
|
||||
" # Replace the two lines below\n",
|
||||
" this_pixel_value = 1.0\n",
|
||||
" this_weight = 1.0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Multiply these together and add to the output at this position\n",
|
||||
" out[0, 0, c_y, c_x] += np.sum(this_pixel_value * this_weight)\n",
|
||||
"\n",
|
||||
" return out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EF8FWONVLo1Q"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set random seed so we always get same answer\n",
|
||||
"np.random.seed(1)\n",
|
||||
"n_batch = 1\n",
|
||||
"image_height = 4\n",
|
||||
"image_width = 6\n",
|
||||
"channels_in = 1\n",
|
||||
"kernel_size = 3\n",
|
||||
"channels_out = 1\n",
|
||||
"\n",
|
||||
"# Create random input image\n",
|
||||
"input_image= np.random.normal(size=(n_batch, channels_in, image_height, image_width))\n",
|
||||
"# Create random convolution kernel weights\n",
|
||||
"conv_weights = np.random.normal(size=(channels_out, channels_in, kernel_size, kernel_size))\n",
|
||||
"\n",
|
||||
"# Perform convolution using PyTorch\n",
|
||||
"conv_results_pytorch = conv_pytorch(input_image, conv_weights, stride=1, pad=1)\n",
|
||||
"print(\"PyTorch Results\")\n",
|
||||
"print(conv_results_pytorch)\n",
|
||||
"\n",
|
||||
"# Perform convolution in numpy\n",
|
||||
"print(\"Your results\")\n",
|
||||
"conv_results_numpy = conv_numpy_1(input_image, conv_weights)\n",
|
||||
"print(conv_results_numpy)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "iw9KqXZTHN8v"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's now add in the possibility of using different strides"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IYj_lxeGzaHX"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Perform convolution in numpy\n",
|
||||
"def conv_numpy_2(image, weights, stride=1, pad=1):\n",
|
||||
"\n",
|
||||
" # Perform zero padding\n",
|
||||
" if pad != 0:\n",
|
||||
" image = np.pad(image, ((0, 0), (0 ,0), (pad, pad), (pad, pad)),'constant')\n",
|
||||
"\n",
|
||||
" # Get sizes of image array and kernel weights\n",
|
||||
" batchSize, channelsIn, imageHeightIn, imageWidthIn = image.shape\n",
|
||||
" channelsOut, channelsIn, kernelHeight, kernelWidth = weights.shape\n",
|
||||
"\n",
|
||||
" # Get size of output arrays\n",
|
||||
" imageHeightOut = np.floor(1 + (imageHeightIn - kernelHeight) / stride).astype(int)\n",
|
||||
" imageWidthOut = np.floor(1 + (imageWidthIn - kernelWidth) / stride).astype(int)\n",
|
||||
"\n",
|
||||
" # Create output\n",
|
||||
" out = np.zeros((batchSize, channelsOut, imageHeightOut, imageWidthOut), dtype=np.float32)\n",
|
||||
"\n",
|
||||
" for c_y in range(imageHeightOut):\n",
|
||||
" for c_x in range(imageWidthOut):\n",
|
||||
" for c_kernel_y in range(kernelHeight):\n",
|
||||
" for c_kernel_x in range(kernelWidth):\n",
|
||||
" # TODO -- Retrieve the image pixel and the weight from the convolution\n",
|
||||
" # Only one image in batch, one input channel and one output channel, so these indices should all be zero\n",
|
||||
" # Replace the two lines below\n",
|
||||
" this_pixel_value = 1.0\n",
|
||||
" this_weight = 1.0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Multiply these together and add to the output at this position\n",
|
||||
" out[0, 0, c_y, c_x] += np.sum(this_pixel_value * this_weight)\n",
|
||||
"\n",
|
||||
" return out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "GiujmLhqHN1F"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set random seed so we always get same answer\n",
|
||||
"np.random.seed(1)\n",
|
||||
"n_batch = 1\n",
|
||||
"image_height = 12\n",
|
||||
"image_width = 10\n",
|
||||
"channels_in = 1\n",
|
||||
"kernel_size = 3\n",
|
||||
"channels_out = 1\n",
|
||||
"stride = 2\n",
|
||||
"\n",
|
||||
"# Create random input image\n",
|
||||
"input_image= np.random.normal(size=(n_batch, channels_in, image_height, image_width))\n",
|
||||
"# Create random convolution kernel weights\n",
|
||||
"conv_weights = np.random.normal(size=(channels_out, channels_in, kernel_size, kernel_size))\n",
|
||||
"\n",
|
||||
"# Perform convolution using PyTorch\n",
|
||||
"conv_results_pytorch = conv_pytorch(input_image, conv_weights, stride, pad=1)\n",
|
||||
"print(\"PyTorch Results\")\n",
|
||||
"print(conv_results_pytorch)\n",
|
||||
"\n",
|
||||
"# Perform convolution in numpy\n",
|
||||
"print(\"Your results\")\n",
|
||||
"conv_results_numpy = conv_numpy_2(input_image, conv_weights, stride, pad=1)\n",
|
||||
"print(conv_results_numpy)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FeJy6Bvozgxq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll introduce multiple input and output channels"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3flq1Wan2gX-"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Perform convolution in numpy\n",
|
||||
"def conv_numpy_3(image, weights, stride=1, pad=1):\n",
|
||||
"\n",
|
||||
" # Perform zero padding\n",
|
||||
" if pad != 0:\n",
|
||||
" image = np.pad(image, ((0, 0), (0 ,0), (pad, pad), (pad, pad)),'constant')\n",
|
||||
"\n",
|
||||
" # Get sizes of image array and kernel weights\n",
|
||||
" batchSize, channelsIn, imageHeightIn, imageWidthIn = image.shape\n",
|
||||
" channelsOut, channelsIn, kernelHeight, kernelWidth = weights.shape\n",
|
||||
"\n",
|
||||
" # Get size of output arrays\n",
|
||||
" imageHeightOut = np.floor(1 + (imageHeightIn - kernelHeight) / stride).astype(int)\n",
|
||||
" imageWidthOut = np.floor(1 + (imageWidthIn - kernelWidth) / stride).astype(int)\n",
|
||||
"\n",
|
||||
" # Create output\n",
|
||||
" out = np.zeros((batchSize, channelsOut, imageHeightOut, imageWidthOut), dtype=np.float32)\n",
|
||||
"\n",
|
||||
" for c_y in range(imageHeightOut):\n",
|
||||
" for c_x in range(imageWidthOut):\n",
|
||||
" for c_channel_out in range(channelsOut):\n",
|
||||
" for c_channel_in in range(channelsIn):\n",
|
||||
" for c_kernel_y in range(kernelHeight):\n",
|
||||
" for c_kernel_x in range(kernelWidth):\n",
|
||||
" # TODO -- Retrieve the image pixel and the weight from the convolution\n",
|
||||
" # Only one image in batch so this index should be zero\n",
|
||||
" # Replace the two lines below\n",
|
||||
" this_pixel_value = 1.0\n",
|
||||
" this_weight = 1.0\n",
|
||||
"\n",
|
||||
" # Multiply these together and add to the output at this position\n",
|
||||
" out[0, c_channel_out, c_y, c_x] += np.sum(this_pixel_value * this_weight)\n",
|
||||
" return out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AvdRWGiU2ppX"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set random seed so we always get same answer\n",
|
||||
"np.random.seed(1)\n",
|
||||
"n_batch = 1\n",
|
||||
"image_height = 4\n",
|
||||
"image_width = 6\n",
|
||||
"channels_in = 5\n",
|
||||
"kernel_size = 3\n",
|
||||
"channels_out = 2\n",
|
||||
"\n",
|
||||
"# Create random input image\n",
|
||||
"input_image= np.random.normal(size=(n_batch, channels_in, image_height, image_width))\n",
|
||||
"# Create random convolution kernel weights\n",
|
||||
"conv_weights = np.random.normal(size=(channels_out, channels_in, kernel_size, kernel_size))\n",
|
||||
"\n",
|
||||
"# Perform convolution using PyTorch\n",
|
||||
"conv_results_pytorch = conv_pytorch(input_image, conv_weights, stride=1, pad=1)\n",
|
||||
"print(\"PyTorch Results\")\n",
|
||||
"print(conv_results_pytorch)\n",
|
||||
"\n",
|
||||
"# Perform convolution in numpy\n",
|
||||
"print(\"Your results\")\n",
|
||||
"conv_results_numpy = conv_numpy_3(input_image, conv_weights, stride=1, pad=1)\n",
|
||||
"print(conv_results_numpy)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "mdSmjfvY4li2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now we'll do the full convolution with multiple images (batch size > 1), and multiple input channels, multiple output channels."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Q2MUFebdsJbH"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Perform convolution in numpy\n",
|
||||
"def conv_numpy_4(image, weights, stride=1, pad=1):\n",
|
||||
"\n",
|
||||
" # Perform zero padding\n",
|
||||
" if pad != 0:\n",
|
||||
" image = np.pad(image, ((0, 0), (0 ,0), (pad, pad), (pad, pad)),'constant')\n",
|
||||
"\n",
|
||||
" # Get sizes of image array and kernel weights\n",
|
||||
" batchSize, channelsIn, imageHeightIn, imageWidthIn = image.shape\n",
|
||||
" channelsOut, channelsIn, kernelHeight, kernelWidth = weights.shape\n",
|
||||
"\n",
|
||||
" # Get size of output arrays\n",
|
||||
" imageHeightOut = np.floor(1 + (imageHeightIn - kernelHeight) / stride).astype(int)\n",
|
||||
" imageWidthOut = np.floor(1 + (imageWidthIn - kernelWidth) / stride).astype(int)\n",
|
||||
"\n",
|
||||
" # Create output\n",
|
||||
" out = np.zeros((batchSize, channelsOut, imageHeightOut, imageWidthOut), dtype=np.float32)\n",
|
||||
"\n",
|
||||
" for c_batch in range(batchSize):\n",
|
||||
" for c_y in range(imageHeightOut):\n",
|
||||
" for c_x in range(imageWidthOut):\n",
|
||||
" for c_channel_out in range(channelsOut):\n",
|
||||
" for c_channel_in in range(channelsIn):\n",
|
||||
" for c_kernel_y in range(kernelHeight):\n",
|
||||
" for c_kernel_x in range(kernelWidth):\n",
|
||||
" # TODO -- Retrieve the image pixel and the weight from the convolution\n",
|
||||
" # Replace the two lines below\n",
|
||||
" this_pixel_value = 1.0\n",
|
||||
" this_weight = 1.0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Multiply these together and add to the output at this position\n",
|
||||
" out[c_batch, c_channel_out, c_y, c_x] += np.sum(this_pixel_value * this_weight)\n",
|
||||
" return out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5WePF-Y-sC1y"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "1w2GEBtqAM2P"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set random seed so we always get same answer\n",
|
||||
"np.random.seed(1)\n",
|
||||
"n_batch = 2\n",
|
||||
"image_height = 4\n",
|
||||
"image_width = 6\n",
|
||||
"channels_in = 5\n",
|
||||
"kernel_size = 3\n",
|
||||
"channels_out = 2\n",
|
||||
"\n",
|
||||
"# Create random input image\n",
|
||||
"input_image= np.random.normal(size=(n_batch, channels_in, image_height, image_width))\n",
|
||||
"# Create random convolution kernel weights\n",
|
||||
"conv_weights = np.random.normal(size=(channels_out, channels_in, kernel_size, kernel_size))\n",
|
||||
"\n",
|
||||
"# Perform convolution using PyTorch\n",
|
||||
"conv_results_pytorch = conv_pytorch(input_image, conv_weights, stride=1, pad=1)\n",
|
||||
"print(\"PyTorch Results\")\n",
|
||||
"print(conv_results_pytorch)\n",
|
||||
"\n",
|
||||
"# Perform convolution in numpy\n",
|
||||
"print(\"Your results\")\n",
|
||||
"conv_results_numpy = conv_numpy_4(input_image, conv_weights, stride=1, pad=1)\n",
|
||||
"print(conv_results_numpy)"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
520
Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb
Normal file
520
Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb
Normal file
@@ -0,0 +1,520 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMbSR8fzpXvO6TIQdO7bI0H",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 10.4: Downsampling and Upsampling**\n",
|
||||
"\n",
|
||||
"This notebook investigates the down sampling and downsampling methods discussed in section 10.4 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from PIL import Image\n",
|
||||
"from numpy import asarray"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define 4 by 4 original patch\n",
|
||||
"orig_4_4 = np.array([[1, 3, 5,3 ], [6,2,0,8], [4,6,1,4], [2,8,0,3]])\n",
|
||||
"print(orig_4_4)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WPRoJcC_JXE2"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def subsample(x_in):\n",
|
||||
" x_out = np.zeros(( int(np.ceil(x_in.shape[0]/2)), int(np.ceil(x_in.shape[1]/2)) ))\n",
|
||||
" # TO DO -- write the subsampling routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qneyOiZRJubi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_4_4)\n",
|
||||
"print(\"Subsampled:\")\n",
|
||||
"print(subsample(orig_4_4))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "O_i0y72_JwGZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's try that on an image to get a feel for how it works:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "AobyC8IILbCO"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap10/test_image.png"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3dJEo-6DM-Py"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# load the image\n",
|
||||
"image = Image.open('test_image.png')\n",
|
||||
"# convert image to numpy array\n",
|
||||
"data = asarray(image)\n",
|
||||
"data_subsample = subsample(data);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_subsample, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_subsample2 = subsample(data_subsample)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_subsample2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_subsample3 = subsample(data_subsample2)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_subsample3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HCZVutk6NB6B"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's try max-pooling\n",
|
||||
"def maxpool(x_in):\n",
|
||||
" x_out = np.zeros(( int(np.floor(x_in.shape[0]/2)), int(np.floor(x_in.shape[1]/2)) ))\n",
|
||||
" # TO DO -- write the maxpool routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Z99uYehaPtJa"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_4_4)\n",
|
||||
"print(\"Maxpooled:\")\n",
|
||||
"print(maxpool(orig_4_4))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "J4KMTMmG9P44"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's see what Rick looks like:\n",
|
||||
"data_maxpool = maxpool(data);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_maxpool, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_maxpool2 = maxpool(data_maxpool)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_maxpool2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_maxpool3 = maxpool(data_maxpool2)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_maxpool3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0ES0sB8t9Wyv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can see that the stripes on his shirt gradually turn to white because we keep retaining the brightest local pixels."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nMtSdBGlAktq"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Finally, let's try mean pooling\n",
|
||||
"def meanpool(x_in):\n",
|
||||
" x_out = np.zeros(( int(np.floor(x_in.shape[0]/2)), int(np.floor(x_in.shape[1]/2)) ))\n",
|
||||
" # TO DO -- write the meanpool routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ZQBjBtmB_aGQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_4_4)\n",
|
||||
"print(\"Meanpooled:\")\n",
|
||||
"print(meanpool(orig_4_4))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "N4VDlWNt_8dp"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's see what Rick looks like:\n",
|
||||
"data_meanpool = meanpool(data);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_meanpool, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_meanpool2 = meanpool(data_maxpool)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_meanpool2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_meanpool3 = meanpool(data_meanpool2)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_meanpool3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Lkg5zUYo_-IV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Notice that the three low resolution images look quite different. <br>\n",
|
||||
"\n",
|
||||
"Now let's upscale them again"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "J7VssF4pBf2y"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define 2 by 2 original patch\n",
|
||||
"orig_2_2 = np.array([[2, 4], [4,8]])\n",
|
||||
"print(orig_2_2)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Q4N7i76FA_YH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's first use the duplication method\n",
|
||||
"def duplicate(x_in):\n",
|
||||
" x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n",
|
||||
" # TO DO -- write the duplication routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "6eSjnl3cB5g4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_2_2)\n",
|
||||
"print(\"Duplicated:\")\n",
|
||||
"print(duplicate(orig_2_2))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4FtRcvXrFLg7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's re-upsample, sub-sampled rick\n",
|
||||
"data_duplicate = duplicate(data_subsample3);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_subsample3, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_duplicate, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_duplicate2 = duplicate(data_duplicate)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_duplicate2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_duplicate3 = duplicate(data_duplicate2)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_duplicate3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "agq0YN34FQfA"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"They look the same, but if you look at the axes, you'll see that the pixels are just duplicated."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bCQrJ_M8GUFs"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's try max pooling back up\n",
|
||||
"# The input x_high_res is the original high res image, from which you can deduce the position of the maximum index\n",
|
||||
"def max_unpool(x_in, x_high_res):\n",
|
||||
" x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n",
|
||||
" # TO DO -- write the subsampling routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uDUDChmBF71_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_2_2)\n",
|
||||
"print(\"Max unpooled:\")\n",
|
||||
"print(max_unpool(orig_2_2,orig_4_4))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EmjptCVNHq74"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's re-upsample, sub-sampled rick\n",
|
||||
"data_max_unpool= max_unpool(data_maxpool3,data_maxpool2);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_maxpool3, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_max_unpool, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_max_unpool2 = max_unpool(data_max_unpool, data_maxpool)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_max_unpool2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_max_unpool3 = max_unpool(data_max_unpool2, data)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_max_unpool3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "SSPhTuV6H4ZH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Finally, we'll try upsampling using bilinear interpolation. We'll treat the positions off the image as zeros by padding the original image and round fractional values upwards using np.ceil()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sBx36bvbJHrK"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def bilinear(x_in):\n",
|
||||
" x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n",
|
||||
" x_in_pad = np.zeros((x_in.shape[0]+1, x_in.shape[1]+1))\n",
|
||||
" x_in_pad[0:x_in.shape[0],0:x_in.shape[1]] = x_in\n",
|
||||
" # TO DO -- write the duplication routine\n",
|
||||
" # Replace this line\n",
|
||||
" x_out = x_out\n",
|
||||
"\n",
|
||||
" return x_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "00XpfQo3Ivdf"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(\"Original:\")\n",
|
||||
"print(orig_2_2)\n",
|
||||
"print(\"Bilinear:\")\n",
|
||||
"print(bilinear(orig_2_2))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qI5oRVCCNRob"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's re-upsample, sub-sampled rick\n",
|
||||
"data_bilinear = bilinear(data_meanpool3);\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_meanpool3, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_bilinear, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_bilinear2 = bilinear(data_bilinear)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_bilinear2, cmap='gray')\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"data_bilinear3 = duplicate(data_bilinear2)\n",
|
||||
"plt.figure(figsize=(5,5))\n",
|
||||
"plt.imshow(data_bilinear3, cmap='gray')\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "4m0bkhdmNRec"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
296
Notebooks/Chap10/10_5_Convolution_For_MNIST.ipynb
Normal file
296
Notebooks/Chap10/10_5_Convolution_For_MNIST.ipynb
Normal file
@@ -0,0 +1,296 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNAcc98STMeyQgh9SbVHWG+",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap10/10_5_Convolution_For_MNIST.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 10.5: Convolution for MNIST**\n",
|
||||
"\n",
|
||||
"This notebook builds a proper network for 2D convolution. It works with the MNIST dataset (figure 15.15a), which was the original classic dataset for classifying images. The network will take a 28x28 grayscale image and classify it into one of 10 classes representing a digit.\n",
|
||||
"\n",
|
||||
"The code is adapted from https://nextjournal.com/gkoehler/pytorch-mnist\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torchvision\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.nn.functional as F\n",
|
||||
"import torch.optim as optim\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this once to load the train and test data straight into a dataloader class\n",
|
||||
"# that will provide the batches\n",
|
||||
"batch_size_train = 64\n",
|
||||
"batch_size_test = 1000\n",
|
||||
"train_loader = torch.utils.data.DataLoader(\n",
|
||||
" torchvision.datasets.MNIST('/files/', train=True, download=True,\n",
|
||||
" transform=torchvision.transforms.Compose([\n",
|
||||
" torchvision.transforms.ToTensor(),\n",
|
||||
" torchvision.transforms.Normalize(\n",
|
||||
" (0.1307,), (0.3081,))\n",
|
||||
" ])),\n",
|
||||
" batch_size=batch_size_train, shuffle=True)\n",
|
||||
"\n",
|
||||
"test_loader = torch.utils.data.DataLoader(\n",
|
||||
" torchvision.datasets.MNIST('/files/', train=False, download=True,\n",
|
||||
" transform=torchvision.transforms.Compose([\n",
|
||||
" torchvision.transforms.ToTensor(),\n",
|
||||
" torchvision.transforms.Normalize(\n",
|
||||
" (0.1307,), (0.3081,))\n",
|
||||
" ])),\n",
|
||||
" batch_size=batch_size_test, shuffle=True)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wScBGXXFVadm"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's draw some of the training data\n",
|
||||
"examples = enumerate(test_loader)\n",
|
||||
"batch_idx, (example_data, example_targets) = next(examples)\n",
|
||||
"\n",
|
||||
"fig = plt.figure()\n",
|
||||
"for i in range(6):\n",
|
||||
" plt.subplot(2,3,i+1)\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.imshow(example_data[i][0], cmap='gray', interpolation='none')\n",
|
||||
" plt.title(\"Ground Truth: {}\".format(example_targets[i]))\n",
|
||||
" plt.xticks([])\n",
|
||||
" plt.yticks([])\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8bKADvLHbiV5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Define the network. This is a more typical way to define a network than the sequential structure. We define a class for the network, and define the parameters in the constructor. Then we use a function called forward to actually run the network. It's easy to see how you might use residual connections in this format."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_sFvRDGrl4qe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from os import X_OK\n",
|
||||
"# TODO Change this class to implement\n",
|
||||
"# 1. A valid convolution with kernel size 5, 1 input channel and 10 output channels\n",
|
||||
"# 2. A max pooling operation over a 2x2 area\n",
|
||||
"# 3. A Relu\n",
|
||||
"# 4. A valid convolution with kernel size 5, 10 input channels and 20 output channels\n",
|
||||
"# 5. A 2D Dropout layer\n",
|
||||
"# 6. A max pooling operation over a 2x2 area\n",
|
||||
"# 7. A relu\n",
|
||||
"# 8. A flattening operation\n",
|
||||
"# 9. A fully connected layer mapping from (whatever dimensions we are at-- find out using .shape) to 50\n",
|
||||
"# 10. A ReLU\n",
|
||||
"# 11. A fully connected layer mapping from 50 to 10 dimensions\n",
|
||||
"# 12. A softmax function.\n",
|
||||
"\n",
|
||||
"# Replace this class which implements a minimal network (which still does okay)\n",
|
||||
"class Net(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Net, self).__init__()\n",
|
||||
" # Valid convolution, 1 channel in, 2 channels out, stride 1, kernel size = 3\n",
|
||||
" self.conv1 = nn.Conv2d(1, 2, kernel_size=3)\n",
|
||||
" # Dropout for convolutions\n",
|
||||
" self.drop = nn.Dropout2d()\n",
|
||||
" # Fully connected layer\n",
|
||||
" self.fc1 = nn.Linear(338, 10)\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv1(x)\n",
|
||||
" x = self.drop(x)\n",
|
||||
" x = F.max_pool2d(x,2)\n",
|
||||
" x = F.relu(x)\n",
|
||||
" x = x.flatten(1)\n",
|
||||
" x = self.fc1(x)\n",
|
||||
" x = F.log_softmax(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EQkvw2KOPVl7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# He initialization of weights\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qWZtkCZcU_dg"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Create network\n",
|
||||
"model = Net()\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"# Define optimizer\n",
|
||||
"optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FslroPJJffrh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Main training routine\n",
|
||||
"def train(epoch):\n",
|
||||
" model.train()\n",
|
||||
" # Get each\n",
|
||||
" for batch_idx, (data, target) in enumerate(train_loader):\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" output = model(data)\n",
|
||||
" loss = F.nll_loss(output, target)\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" # Store results\n",
|
||||
" if batch_idx % 10 == 0:\n",
|
||||
" print('Train Epoch: {} [{}/{}]\\tLoss: {:.6f}'.format(\n",
|
||||
" epoch, batch_idx * len(data), len(train_loader.dataset), loss.item()))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "xKQd9PzkQ766"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run on test data\n",
|
||||
"def test():\n",
|
||||
" model.eval()\n",
|
||||
" test_loss = 0\n",
|
||||
" correct = 0\n",
|
||||
" with torch.no_grad():\n",
|
||||
" for data, target in test_loader:\n",
|
||||
" output = model(data)\n",
|
||||
" test_loss += F.nll_loss(output, target, size_average=False).item()\n",
|
||||
" pred = output.data.max(1, keepdim=True)[1]\n",
|
||||
" correct += pred.eq(target.data.view_as(pred)).sum()\n",
|
||||
" test_loss /= len(test_loader.dataset)\n",
|
||||
" print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n",
|
||||
" test_loss, correct, len(test_loader.dataset),\n",
|
||||
" 100. * correct / len(test_loader.dataset)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Byn-f7qWRLxX"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Get initial performance\n",
|
||||
"test()\n",
|
||||
"# Train for three epochs\n",
|
||||
"n_epochs = 3\n",
|
||||
"for epoch in range(1, n_epochs + 1):\n",
|
||||
" train(epoch)\n",
|
||||
" test()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YgLaex1pfhqz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run network on data we got before and show predictions\n",
|
||||
"output = model(example_data)\n",
|
||||
"\n",
|
||||
"fig = plt.figure()\n",
|
||||
"for i in range(10):\n",
|
||||
" plt.subplot(5,5,i+1)\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.imshow(example_data[i][0], cmap='gray', interpolation='none')\n",
|
||||
" plt.title(\"Prediction: {}\".format(\n",
|
||||
" output.data.max(1, keepdim=True)[1][i].item()))\n",
|
||||
" plt.xticks([])\n",
|
||||
" plt.yticks([])\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "o7fRUAy9Se1B"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
Notebooks/Chap10/test_image.png
Normal file
BIN
Notebooks/Chap10/test_image.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
392
Notebooks/Chap11/11_1_Shattered_Gradients.ipynb
Normal file
392
Notebooks/Chap11/11_1_Shattered_Gradients.ipynb
Normal file
@@ -0,0 +1,392 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMrF4rB2hTKq7XzLuYsURdL",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap11/11_1_Shattered_Gradients.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 11.1: Shattered gradients**\n",
|
||||
"\n",
|
||||
"This notebook investigates the phenomenon of shattered gradients as discussed in section 11.1.1. It replicates some of the experiments in [Balduzzi et al. (2017)](https://arxiv.org/abs/1702.08591).\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pOZ6Djz0dhoy"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "iaFyNGhU21VJ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First let's define a neural network. We'll initialize both the weights and biases randomly with Glorot initialization (He initialization without the factor of two)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YcNlAxnE3XXn"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# K is width, D is number of hidden units in each layer\n",
|
||||
"def init_params(K, D):\n",
|
||||
" # Set seed so we always get the same random numbers\n",
|
||||
" np.random.seed(1)\n",
|
||||
"\n",
|
||||
" # Input layer\n",
|
||||
" D_i = 1\n",
|
||||
" # Output layer\n",
|
||||
" D_o = 1\n",
|
||||
"\n",
|
||||
" # Glorot initialization\n",
|
||||
" sigma_sq_omega = 1.0/D\n",
|
||||
"\n",
|
||||
" # Make empty lists\n",
|
||||
" all_weights = [None] * (K+1)\n",
|
||||
" all_biases = [None] * (K+1)\n",
|
||||
"\n",
|
||||
" # Create parameters for input and output layers\n",
|
||||
" all_weights[0] = np.random.normal(size=(D, D_i))*np.sqrt(sigma_sq_omega)\n",
|
||||
" all_weights[-1] = np.random.normal(size=(D_o, D)) * np.sqrt(sigma_sq_omega)\n",
|
||||
" all_biases[0] = np.random.normal(size=(D,1))* np.sqrt(sigma_sq_omega)\n",
|
||||
" all_biases[-1]= np.random.normal(size=(D_o,1))* np.sqrt(sigma_sq_omega)\n",
|
||||
"\n",
|
||||
" # Create intermediate layers\n",
|
||||
" for layer in range(1,K):\n",
|
||||
" all_weights[layer] = np.random.normal(size=(D,D))*np.sqrt(sigma_sq_omega)\n",
|
||||
" all_biases[layer] = np.random.normal(size=(D,1))* np.sqrt(sigma_sq_omega)\n",
|
||||
"\n",
|
||||
" return all_weights, all_biases"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "kr-q7hc23Bn9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The next two functions define the forward pass of the algorithm"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "kwcn5z7-dq_1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation\n",
|
||||
"\n",
|
||||
"def forward_pass(net_input, all_weights, all_biases):\n",
|
||||
"\n",
|
||||
" # Retrieve number of layers\n",
|
||||
" K = len(all_weights) -1\n",
|
||||
"\n",
|
||||
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
||||
" # and the activations in a second list[all_h].\n",
|
||||
" all_f = [None] * (K+1)\n",
|
||||
" all_h = [None] * (K+1)\n",
|
||||
"\n",
|
||||
" #For convenience, we'll set\n",
|
||||
" # all_h[0] to be the input, and all_f[K] will be the output\n",
|
||||
" all_h[0] = net_input\n",
|
||||
"\n",
|
||||
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
|
||||
" for layer in range(K):\n",
|
||||
" # Update preactivations and activations at this layer according to eqn 7.5\n",
|
||||
" all_f[layer] = all_biases[layer] + np.matmul(all_weights[layer], all_h[layer])\n",
|
||||
" all_h[layer+1] = ReLU(all_f[layer])\n",
|
||||
"\n",
|
||||
" # Compute the output from the last hidden layer\n",
|
||||
" all_f[K] = all_biases[K] + np.matmul(all_weights[K], all_h[K])\n",
|
||||
"\n",
|
||||
" # Retrieve the output\n",
|
||||
" net_output = all_f[K]\n",
|
||||
"\n",
|
||||
" return net_output, all_f, all_h"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_2w-Tr7G3sYq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The next two functions compute the gradient of the output with respect to the input using the back propagation algorithm."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aM2l7QafeC8T"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# We'll need the indicator function\n",
|
||||
"def indicator_function(x):\n",
|
||||
" x_in = np.array(x)\n",
|
||||
" x_in[x_in>=0] = 1\n",
|
||||
" x_in[x_in<0] = 0\n",
|
||||
" return x_in\n",
|
||||
"\n",
|
||||
"# Main backward pass routine\n",
|
||||
"def calc_input_output_gradient(x_in, all_weights, all_biases):\n",
|
||||
"\n",
|
||||
" # Run the forward pass\n",
|
||||
" y, all_f, all_h = forward_pass(x_in, all_weights, all_biases)\n",
|
||||
"\n",
|
||||
" # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n",
|
||||
" all_dl_dweights = [None] * (K+1)\n",
|
||||
" all_dl_dbiases = [None] * (K+1)\n",
|
||||
" # And we'll store the derivatives of the loss with respect to the activation and preactivations in lists\n",
|
||||
" all_dl_df = [None] * (K+1)\n",
|
||||
" all_dl_dh = [None] * (K+1)\n",
|
||||
" # Again for convenience we'll stick with the convention that all_h[0] is the net input and all_f[k] in the net output\n",
|
||||
"\n",
|
||||
" # Compute derivatives of net output with respect to loss\n",
|
||||
" all_dl_df[K] = np.ones_like(all_f[K])\n",
|
||||
"\n",
|
||||
" # Now work backwards through the network\n",
|
||||
" for layer in range(K,-1,-1):\n",
|
||||
" all_dl_dbiases[layer] = np.array(all_dl_df[layer])\n",
|
||||
" all_dl_dweights[layer] = np.matmul(all_dl_df[layer], all_h[layer].transpose())\n",
|
||||
"\n",
|
||||
" all_dl_dh[layer] = np.matmul(all_weights[layer].transpose(), all_dl_df[layer])\n",
|
||||
"\n",
|
||||
" if layer > 0:\n",
|
||||
" all_dl_df[layer-1] = indicator_function(all_f[layer-1]) * all_dl_dh[layer]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return all_dl_dh[0],y"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "DwR3eGMgV8bl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Double check we have the gradient correct using finite differences"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Ar_VmraReSWe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"D = 200; K = 3\n",
|
||||
"# Initialize parameters\n",
|
||||
"all_weights, all_biases = init_params(K,D)\n",
|
||||
"\n",
|
||||
"x = np.ones((1,1))\n",
|
||||
"dydx,y = calc_input_output_gradient(x, all_weights, all_biases)\n",
|
||||
"\n",
|
||||
"# Offset for finite gradients\n",
|
||||
"delta = 0.00000001\n",
|
||||
"x1 = x\n",
|
||||
"y1,*_ = forward_pass(x1, all_weights, all_biases)\n",
|
||||
"x2 = x+delta\n",
|
||||
"y2,*_ = forward_pass(x2, all_weights, all_biases)\n",
|
||||
"# Finite difference calculation\n",
|
||||
"dydx_fd = (y2-y1)/delta\n",
|
||||
"\n",
|
||||
"print(\"Gradient calculation=%f, Finite difference gradient=%f\"%(dydx,dydx_fd))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "KJpQPVd36Haq"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Helper function that computes the derivatives for a 1D array of input values and plots them."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YC-LAYRKtbxp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def plot_derivatives(K, D):\n",
|
||||
"\n",
|
||||
" # Initialize parameters\n",
|
||||
" all_weights, all_biases = init_params(K,D)\n",
|
||||
"\n",
|
||||
" x_in = np.arange(-2,2, 4.0/256.0)\n",
|
||||
" x_in = np.resize(x_in, (1,len(x_in)))\n",
|
||||
" dydx,y = calc_input_output_gradient(x_in, all_weights, all_biases)\n",
|
||||
"\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(np.squeeze(x_in), np.squeeze(dydx), 'b-')\n",
|
||||
" ax.set_xlim(-2,2)\n",
|
||||
" ax.set_xlabel('Input, $x$')\n",
|
||||
" ax.set_ylabel('Gradient, $dy/dx$')\n",
|
||||
" ax.set_title('No layers = %d'%(K))\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "uJr5eDe648jF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Build a model with one hidden layer and 200 neurons and plot derivatives\n",
|
||||
"D = 200; K = 1\n",
|
||||
"plot_derivatives(K,D)\n",
|
||||
"\n",
|
||||
"# TODO -- Interpret this result\n",
|
||||
"# Why does the plot have some flat regions?\n",
|
||||
"\n",
|
||||
"# TODO -- Add code to plot the derivatives for models with 24 and 50 hidden layers\n",
|
||||
"# with 200 neurons per layer\n",
|
||||
"\n",
|
||||
"# TODO -- Why does this graph not have visible flat regions?\n",
|
||||
"\n",
|
||||
"# TODO -- Why does the magnitude of the gradients decrease as we increase the number\n",
|
||||
"# of hidden layers\n",
|
||||
"\n",
|
||||
"# TODO -- Do you find this a convincing replication of the experiment in the original paper? (I don't)\n",
|
||||
"# Can you help me find why I have failed to replicate this result? udlbookmail@gmail.com"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "56gTMTCb49KO"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's look at the autocorrelation function now"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "f_0zjQbxuROQ"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def autocorr(dydx):\n",
|
||||
" # TODO -- compute the autocorrelation function\n",
|
||||
" # Use the numpy function \"correlate\" with the mode set to \"same\"\n",
|
||||
" # Replace this line:\n",
|
||||
" ac = np.ones((256,1))\n",
|
||||
"\n",
|
||||
" return ac"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ggnO8hfoRN1e"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Helper function to plot the autocorrelation function and normalize so correlation is one with offset of zero"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "EctWSV1RuddK"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def plot_autocorr(K, D):\n",
|
||||
"\n",
|
||||
" # Initialize parameters\n",
|
||||
" all_weights, all_biases = init_params(K,D)\n",
|
||||
"\n",
|
||||
" x_in = np.arange(-2.0,2.0, 4.0/256)\n",
|
||||
" x_in = np.resize(x_in, (1,len(x_in)))\n",
|
||||
" dydx,y = calc_input_output_gradient(x_in, all_weights, all_biases)\n",
|
||||
" ac = autocorr(np.squeeze(dydx))\n",
|
||||
" ac = ac / ac[128]\n",
|
||||
"\n",
|
||||
" y = ac[128:]\n",
|
||||
" x = np.squeeze(x_in)[128:]\n",
|
||||
" fig,ax = plt.subplots()\n",
|
||||
" ax.plot(x,y, 'b-')\n",
|
||||
" ax.set_xlim([0,2])\n",
|
||||
" ax.set_xlabel('Distance')\n",
|
||||
" ax.set_ylabel('Autocorrelation')\n",
|
||||
" ax.set_title('No layers = %d'%(K))\n",
|
||||
" plt.show()\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2LKlZ9u_WQXN"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the autocorrelation functions\n",
|
||||
"D = 200; K =1\n",
|
||||
"plot_autocorr(K,D)\n",
|
||||
"D = 200; K =50\n",
|
||||
"plot_autocorr(K,D)\n",
|
||||
"\n",
|
||||
"# TODO -- Do you find this a convincing replication of the experiment in the original paper? (I don't)\n",
|
||||
"# Can you help me find why I have failed to replicate this result?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RD9JTdjNWw6p"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
277
Notebooks/Chap11/11_2_Residual_Networks.ipynb
Normal file
277
Notebooks/Chap11/11_2_Residual_Networks.ipynb
Normal file
@@ -0,0 +1,277 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyObut1y9atNUuowPT6dMY+I",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap11/11_2_Residual_Networks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 11.2: Residual Networks**\n",
|
||||
"\n",
|
||||
"This notebook adapts the networks for MNIST1D to use residual connections.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "D5yLObtZCi9J"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import os\n",
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "twI72ZCrCt5z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Load in the data\n",
|
||||
"train_data_x = data['x'].transpose()\n",
|
||||
"train_data_y = data['y']\n",
|
||||
"val_data_x = data['x_test'].transpose()\n",
|
||||
"val_data_y = data['y_test']\n",
|
||||
"# Print out sizes\n",
|
||||
"print(\"Train data: %d examples (columns), each of which has %d dimensions (rows)\"%((train_data_x.shape[1],train_data_x.shape[0])))\n",
|
||||
"print(\"Validation data: %d examples (columns), each of which has %d dimensions (rows)\"%((val_data_x.shape[1],val_data_x.shape[0])))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8bKADvLHbiV5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Define the network"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_sFvRDGrl4qe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# There are 40 input dimensions and 10 output dimensions for this data\n",
|
||||
"# The inputs correspond to the 40 offsets in the MNIST1D template.\n",
|
||||
"D_i = 40\n",
|
||||
"# The outputs correspond to the 10 digits\n",
|
||||
"D_o = 10\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# We will adapt this model to have residual connections around the linear layers\n",
|
||||
"# This is the same model we used in practical 8.1, but we can't use the sequential\n",
|
||||
"# class for residual networks (which aren't strictly sequential). Hence, I've rewritten\n",
|
||||
"# it as a model that inherits from a base class\n",
|
||||
"\n",
|
||||
"class ResidualNetwork(torch.nn.Module):\n",
|
||||
" def __init__(self, input_size, output_size, hidden_size=100):\n",
|
||||
" super(ResidualNetwork, self).__init__()\n",
|
||||
" self.linear1 = nn.Linear(input_size, hidden_size)\n",
|
||||
" self.linear2 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear3 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear4 = nn.Linear(hidden_size, output_size)\n",
|
||||
" print(\"Initialized MLPBase model with {} parameters\".format(self.count_params()))\n",
|
||||
"\n",
|
||||
" def count_params(self):\n",
|
||||
" return sum([p.view(-1).shape[0] for p in self.parameters()])\n",
|
||||
"\n",
|
||||
"# # TODO -- Add residual connections to this model\n",
|
||||
"# # The order of operations should similar to figure 11.5b\n",
|
||||
"# # linear1 first, ReLU+linear2 in first residual block, ReLU+linear3 in second residual block), linear4 at end\n",
|
||||
"# # Replace this function\n",
|
||||
" def forward(self, x):\n",
|
||||
" h1 = self.linear1(x).relu()\n",
|
||||
" h2 = self.linear2(h1).relu()\n",
|
||||
" h3 = self.linear3(h2).relu()\n",
|
||||
" return self.linear4(h3)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FslroPJJffrh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# He initialization of weights\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YgLaex1pfhqz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#Define the model\n",
|
||||
"model = ResidualNetwork(40, 10)\n",
|
||||
"\n",
|
||||
"# choose cross entropy loss function (equation 5.24 in the loss notes)\n",
|
||||
"loss_function = nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 20 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=20, gamma=0.5)\n",
|
||||
"# convert data to torch tensors\n",
|
||||
"x_train = torch.tensor(train_data_x.transpose().astype('float32'))\n",
|
||||
"y_train = torch.tensor(train_data_y.astype('long'))\n",
|
||||
"x_val= torch.tensor(val_data_x.transpose().astype('float32'))\n",
|
||||
"y_val = torch.tensor(val_data_y.astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 100\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"losses_train = np.zeros((n_epoch))\n",
|
||||
"errors_train = np.zeros((n_epoch))\n",
|
||||
"losses_val = np.zeros((n_epoch))\n",
|
||||
"errors_val = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, data in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = data\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_val = model(x_val)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_val_class = torch.max(pred_val.data, 1)\n",
|
||||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_val[epoch]= 100 - 100 * (predicted_val_class == y_val).float().sum() / len(y_val)\n",
|
||||
" losses_train[epoch] = loss_function(pred_train, y_train).item()\n",
|
||||
" losses_val[epoch]= loss_function(pred_val, y_val).item()\n",
|
||||
" print(f'Epoch {epoch:5d}, train loss {losses_train[epoch]:.6f}, train error {errors_train[epoch]:3.2f}, val loss {losses_val[epoch]:.6f}, percent error {errors_val[epoch]:3.2f}')\n",
|
||||
"\n",
|
||||
" # tell scheduler to consider updating learning rate\n",
|
||||
" scheduler.step()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NYw8I_3mmX5c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_val,'b-',label='test')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('TrainError %3.2f, Val Error %3.2f'%(errors_train[-1],errors_val[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CcP_VyEmE2sv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The primary motivation of residual networks is to allow training of much deeper networks. \n",
|
||||
"\n",
|
||||
"TODO: Try running this network with and without the residual connections. Does adding the residual connections change the performance?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wMmqhmxuAx0M"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
328
Notebooks/Chap11/11_3_Batch_Normalization.ipynb
Normal file
328
Notebooks/Chap11/11_3_Batch_Normalization.ipynb
Normal file
@@ -0,0 +1,328 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOoGS+lY+EhGthebSO4smpj",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap11/11_3_Batch_Normalization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 11.3: Batch normalization**\n",
|
||||
"\n",
|
||||
"This notebook investigates the use of batch normalization in residual networks.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "D5yLObtZCi9J"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import os\n",
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "twI72ZCrCt5z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Load in the data\n",
|
||||
"train_data_x = data['x'].transpose()\n",
|
||||
"train_data_y = data['y']\n",
|
||||
"val_data_x = data['x_test'].transpose()\n",
|
||||
"val_data_y = data['y_test']\n",
|
||||
"# Print out sizes\n",
|
||||
"print(\"Train data: %d examples (columns), each of which has %d dimensions (rows)\"%((train_data_x.shape[1],train_data_x.shape[0])))\n",
|
||||
"print(\"Validation data: %d examples (columns), each of which has %d dimensions (rows)\"%((val_data_x.shape[1],val_data_x.shape[0])))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8bKADvLHbiV5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def print_variance(name, data):\n",
|
||||
" # First dimension(rows) is batch elements\n",
|
||||
" # Second dimension(columns) is neurons.\n",
|
||||
" np_data = data.detach().numpy()\n",
|
||||
" # Compute variance across neurons and average these variances over members of the batch\n",
|
||||
" neuron_variance = np.mean(np.var(np_data, axis=0))\n",
|
||||
" # Print out the name and the variance\n",
|
||||
" print(\"%s variance=%f\"%(name,neuron_variance))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3bBpJIV-N-lt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# He initialization of weights\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YgLaex1pfhqz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def run_one_step_of_model(model, x_train, y_train):\n",
|
||||
" # choose cross entropy loss function (equation 5.24 in the loss notes)\n",
|
||||
" loss_function = nn.CrossEntropyLoss()\n",
|
||||
" # construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
" optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"\n",
|
||||
" # load the data into a class that creates the batches\n",
|
||||
" data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=200, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
" # Initialize model weights\n",
|
||||
" model.apply(weights_init)\n",
|
||||
"\n",
|
||||
" # Get a batch\n",
|
||||
" for i, data in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = data\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
" # Break out of this loop -- we just want to see the first\n",
|
||||
" # iteration, but usually we would continue\n",
|
||||
" break"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "DFlu45pORQEz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# convert training data to torch tensors\n",
|
||||
"x_train = torch.tensor(train_data_x.transpose().astype('float32'))\n",
|
||||
"y_train = torch.tensor(train_data_y.astype('long'))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "i7Q0ScWgRe4G"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# This is a simple residual model with 5 residual branches in a row\n",
|
||||
"class ResidualNetwork(torch.nn.Module):\n",
|
||||
" def __init__(self, input_size, output_size, hidden_size=100):\n",
|
||||
" super(ResidualNetwork, self).__init__()\n",
|
||||
" self.linear1 = nn.Linear(input_size, hidden_size)\n",
|
||||
" self.linear2 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear3 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear4 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear5 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear6 = nn.Linear(hidden_size, output_size)\n",
|
||||
"\n",
|
||||
" def count_params(self):\n",
|
||||
" return sum([p.view(-1).shape[0] for p in self.parameters()])\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" print_variance(\"Input\",x)\n",
|
||||
" f = self.linear1(x)\n",
|
||||
" print_variance(\"First preactivation\",f)\n",
|
||||
" res1 = f+ self.linear2(f.relu())\n",
|
||||
" print_variance(\"After first residual connection\",res1)\n",
|
||||
" res2 = res1 + self.linear3(res1.relu())\n",
|
||||
" print_variance(\"After second residual connection\",res2)\n",
|
||||
" res3 = res2 + self.linear4(res2.relu())\n",
|
||||
" print_variance(\"After third residual connection\",res3)\n",
|
||||
" res4 = res3 + self.linear4(res3.relu())\n",
|
||||
" print_variance(\"After fourth residual connection\",res4)\n",
|
||||
" res5 = res4 + self.linear4(res4.relu())\n",
|
||||
" print_variance(\"After fifth residual connection\",res5)\n",
|
||||
" return self.linear6(res5)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FslroPJJffrh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the model and run for one step\n",
|
||||
"# Monitoring the variance at each point in the network\n",
|
||||
"n_hidden = 100\n",
|
||||
"n_input = 40\n",
|
||||
"n_output = 10\n",
|
||||
"model = ResidualNetwork(n_input, n_output, n_hidden)\n",
|
||||
"run_one_step_of_model(model, x_train, y_train)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NYw8I_3mmX5c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Notice that the variance roughly doubles at each step so it increases exponentially as in figure 11.6b in the book."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0kZUlWkkW8jE"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Adapt the residual network below to add a batch norm operation\n",
|
||||
"# before the contents of each residual link as in figure 11.6c in the book\n",
|
||||
"# Use the torch function nn.BatchNorm1d\n",
|
||||
"class ResidualNetworkWithBatchNorm(torch.nn.Module):\n",
|
||||
" def __init__(self, input_size, output_size, hidden_size=100):\n",
|
||||
" super(ResidualNetworkWithBatchNorm, self).__init__()\n",
|
||||
" self.linear1 = nn.Linear(input_size, hidden_size)\n",
|
||||
" self.linear2 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear3 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear4 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear5 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear6 = nn.Linear(hidden_size, output_size)\n",
|
||||
"\n",
|
||||
" def count_params(self):\n",
|
||||
" return sum([p.view(-1).shape[0] for p in self.parameters()])\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" print_variance(\"Input\",x)\n",
|
||||
" f = self.linear1(x)\n",
|
||||
" print_variance(\"First preactivation\",f)\n",
|
||||
" res1 = f+ self.linear2(f.relu())\n",
|
||||
" print_variance(\"After first residual connection\",res1)\n",
|
||||
" res2 = res1 + self.linear3(res1.relu())\n",
|
||||
" print_variance(\"After second residual connection\",res2)\n",
|
||||
" res3 = res2 + self.linear4(res2.relu())\n",
|
||||
" print_variance(\"After third residual connection\",res3)\n",
|
||||
" res4 = res3 + self.linear4(res3.relu())\n",
|
||||
" print_variance(\"After fourth residual connection\",res4)\n",
|
||||
" res5 = res4 + self.linear4(res4.relu())\n",
|
||||
" print_variance(\"After fifth residual connection\",res5)\n",
|
||||
" return self.linear6(res5)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "5JvMmaRITKGd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define the model\n",
|
||||
"n_hidden = 100\n",
|
||||
"n_input = 40\n",
|
||||
"n_output = 10\n",
|
||||
"model = ResidualNetworkWithBatchNorm(n_input, n_output, n_hidden)\n",
|
||||
"run_one_step_of_model(model, x_train, y_train)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "2U3DnlH9Uw6c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Note that the variance now increases linearly as in figure 11.6c."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "R_ucFq9CXq8D"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
375
Notebooks/Chap12/12_1_Self_Attention.ipynb
Normal file
375
Notebooks/Chap12/12_1_Self_Attention.ipynb
Normal file
@@ -0,0 +1,375 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOKrX9gmuhl9+KwscpZKr3u",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap12/12_1_Self_Attention.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 12.1: Self Attention**\n",
|
||||
"\n",
|
||||
"This notebook builds a self-attnetion mechanism from scratch, as discussed in section 12.2 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OLComQyvCIJ7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The self-attention mechanism maps $N$ inputs $\\mathbf{x}_{n}\\in\\mathbb{R}^{D}$ and returns $N$ outputs $\\mathbf{x}'_{n}\\in \\mathbb{R}^{D}$. \n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9OJkkoNqCVK2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set seed so we get the same random numbers\n",
|
||||
"np.random.seed(3)\n",
|
||||
"# Number of inputs\n",
|
||||
"N = 3\n",
|
||||
"# Number of dimensions of each input\n",
|
||||
"D = 4\n",
|
||||
"# Create an empty list\n",
|
||||
"all_x = []\n",
|
||||
"# Create elements x_n and append to list\n",
|
||||
"for n in range(N):\n",
|
||||
" all_x.append(np.random.normal(size=(D,1)))\n",
|
||||
"# Print out the list\n",
|
||||
"print(all_x)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "oAygJwLiCSri"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We'll also need the weights and biases for the keys, queries, and values (equations 12.2 and 12.4)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "W2iHFbtKMaDp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set seed so we get the same random numbers\n",
|
||||
"np.random.seed(0)\n",
|
||||
"\n",
|
||||
"# Choose random values for the parameters\n",
|
||||
"omega_q = np.random.normal(size=(D,D))\n",
|
||||
"omega_k = np.random.normal(size=(D,D))\n",
|
||||
"omega_v = np.random.normal(size=(D,D))\n",
|
||||
"beta_q = np.random.normal(size=(D,1))\n",
|
||||
"beta_k = np.random.normal(size=(D,1))\n",
|
||||
"beta_v = np.random.normal(size=(D,1))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "79TSK7oLMobe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the queries, keys, and values for each input"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VxaKQtP3Ng6R"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Make three lists to store queries, keys, and values\n",
|
||||
"all_queries = []\n",
|
||||
"all_keys = []\n",
|
||||
"all_values = []\n",
|
||||
"# For every input\n",
|
||||
"for x in all_x:\n",
|
||||
" # TODO -- compute the keys, queries and values.\n",
|
||||
" # Replace these three lines\n",
|
||||
" query = np.ones_like(x)\n",
|
||||
" key = np.ones_like(x)\n",
|
||||
" value = np.ones_like(x)\n",
|
||||
"\n",
|
||||
" all_queries.append(query)\n",
|
||||
" all_keys.append(key)\n",
|
||||
" all_values.append(value)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TwDK2tfdNmw9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We'll need a softmax function (equation 12.5) -- here, it will take a list of arbirtrary numbers and return a list where the elements are non-negative and sum to one\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Se7DK6PGPSUk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def softmax(items_in):\n",
|
||||
"\n",
|
||||
" # TODO Compute the elements of items_out\n",
|
||||
" # Replace this line\n",
|
||||
" items_out = items_in.copy()\n",
|
||||
"\n",
|
||||
" return items_out ;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "u93LIcE5PoiM"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now compute the self attention values:"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8aJVhbKDW7lm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Create emptymlist for output\n",
|
||||
"all_x_prime = []\n",
|
||||
"\n",
|
||||
"# For each output\n",
|
||||
"for n in range(N):\n",
|
||||
" # Create list for dot products of query N with all keys\n",
|
||||
" all_km_qn = []\n",
|
||||
" # Compute the dot products\n",
|
||||
" for key in all_keys:\n",
|
||||
" # TODO -- compute the appropriate dot product\n",
|
||||
" # Replace this line\n",
|
||||
" dot_product = 1\n",
|
||||
"\n",
|
||||
" # Store dot product\n",
|
||||
" all_km_qn.append(dot_product)\n",
|
||||
"\n",
|
||||
" # Compute dot product\n",
|
||||
" attention = softmax(all_km_qn)\n",
|
||||
" # Print result (should be positive sum to one)\n",
|
||||
" print(\"Attentions for output \", n)\n",
|
||||
" print(attention)\n",
|
||||
"\n",
|
||||
" # TODO: Compute a weighted sum of all of the values according to the attention\n",
|
||||
" # (equation 12.3)\n",
|
||||
" # Replace this line\n",
|
||||
" x_prime = np.zeros((D,1))\n",
|
||||
"\n",
|
||||
" all_x_prime.append(x_prime)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Print out true values to check you have it correct\n",
|
||||
"print(\"x_prime_0_calculated:\", all_x_prime[0].transpose())\n",
|
||||
"print(\"x_prime_0_true: [[ 0.94744244 -0.24348429 -0.91310441 -0.44522983]]\")\n",
|
||||
"print(\"x_prime_1_calculated:\", all_x_prime[1].transpose())\n",
|
||||
"print(\"x_prime_1_true: [[ 1.64201168 -0.08470004 4.02764044 2.18690791]]\")\n",
|
||||
"print(\"x_prime_2_calculated:\", all_x_prime[2].transpose())\n",
|
||||
"print(\"x_prime_2_true: [[ 1.61949281 -0.06641533 3.96863308 2.15858316]]\")\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yimz-5nCW6vQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the same thing, but using matrix calculations. We'll store the $N$ inputs $\\mathbf{x}_{n}\\in\\mathbb{R}^{D}$ in the columns of a $D\\times N$ matrix, using equations 12.6 and 12.7/8.\n",
|
||||
"\n",
|
||||
"Note: The book uses column vectors (for compatibility with the rest of the text), but in the wider literature it is more normal to store the inputs in the rows of a matrix; in this case, the computation is the same, but all the matrices are transposed and the operations proceed in the reverse order."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PJ2vCQ_7C38K"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define softmax operation that works independently on each column\n",
|
||||
"def softmax_cols(data_in):\n",
|
||||
" # Exponentiate all of the values\n",
|
||||
" exp_values = np.exp(data_in) ;\n",
|
||||
" # Sum over columns\n",
|
||||
" denom = np.sum(exp_values, axis = 0);\n",
|
||||
" # Replicate denominator to N rows\n",
|
||||
" denom = np.matmul(np.ones((data_in.shape[0],1)), denom[np.newaxis,:])\n",
|
||||
" # Compute softmax\n",
|
||||
" softmax = exp_values / denom\n",
|
||||
" # return the answer\n",
|
||||
" return softmax"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "obaQBdUAMXXv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
" # Now let's compute self attention in matrix form\n",
|
||||
"def self_attention(X,omega_v, omega_q, omega_k, beta_v, beta_q, beta_k):\n",
|
||||
"\n",
|
||||
" # TODO -- Write this function\n",
|
||||
" # 1. Compute queries, keys, and values\n",
|
||||
" # 2. Compute dot products\n",
|
||||
" # 3. Apply softmax to calculate attentions\n",
|
||||
" # 4. Weight values by attentions\n",
|
||||
" # Replace this line\n",
|
||||
" X_prime = np.zeros_like(X);\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return X_prime"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gb2WvQ3SiH8r"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Copy data into matrix\n",
|
||||
"X = np.zeros((D, N))\n",
|
||||
"X[:,0] = np.squeeze(all_x[0])\n",
|
||||
"X[:,1] = np.squeeze(all_x[1])\n",
|
||||
"X[:,2] = np.squeeze(all_x[2])\n",
|
||||
"\n",
|
||||
"# Run the self attention mechanism\n",
|
||||
"X_prime = self_attention(X,omega_v, omega_q, omega_k, beta_v, beta_q, beta_k)\n",
|
||||
"\n",
|
||||
"# Print out the results\n",
|
||||
"print(X_prime)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MUOJbgJskUpl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"If you did this correctly, the values should be the same as above.\n",
|
||||
"\n",
|
||||
"TODO: \n",
|
||||
"\n",
|
||||
"Print out the attention matrix\n",
|
||||
"You will see that the values are quite extreme (one is very close to one and the others are very close to zero. Now we'll fix this problem by using scaled dot-product attention."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "as_lRKQFpvz0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's compute self attention in matrix form\n",
|
||||
"def scaled_dot_product_self_attention(X,omega_v, omega_q, omega_k, beta_v, beta_q, beta_k):\n",
|
||||
"\n",
|
||||
" # TODO -- Write this function\n",
|
||||
" # 1. Compute queries, keys, and values\n",
|
||||
" # 2. Compute dot products\n",
|
||||
" # 3. Scale the dot products as in equation 12.9\n",
|
||||
" # 4. Apply softmax to calculate attentions\n",
|
||||
" # 5. Weight values by attentions\n",
|
||||
" # Replace this line\n",
|
||||
" X_prime = np.zeros_like(X);\n",
|
||||
"\n",
|
||||
" return X_prime"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "kLU7PUnnqvIh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run the self attention mechanism\n",
|
||||
"X_prime = scaled_dot_product_self_attention(X,omega_v, omega_q, omega_k, beta_v, beta_q, beta_k)\n",
|
||||
"\n",
|
||||
"# Print out the results\n",
|
||||
"print(X_prime)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "n18e3XNzmVgL"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"TODO -- Investigate whether the self-attention mechanism is covariant with respect to permulation.\n",
|
||||
"If it is, when we permute the columns of the input matrix $\\mathbf{X}$, the columns of the output matrix $\\mathbf{X}'$ will also be permuted.\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "QDEkIrcgrql-"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
212
Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb
Normal file
212
Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb
Normal file
@@ -0,0 +1,212 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMSk8qTqDYqFnRJVZKlsue0",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 12.1: Multhead Self-Attention**\n",
|
||||
"\n",
|
||||
"This notebook builds a multihead self-attentionm mechanism as in figure 12.6\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OLComQyvCIJ7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The multihead self-attention mechanism maps $N$ inputs $\\mathbf{x}_{n}\\in\\mathbb{R}^{D}$ and returns $N$ outputs $\\mathbf{x}'_{n}\\in \\mathbb{R}^{D}$. \n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "9OJkkoNqCVK2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Set seed so we get the same random numbers\n",
|
||||
"np.random.seed(3)\n",
|
||||
"# Number of inputs\n",
|
||||
"N = 6\n",
|
||||
"# Number of dimensions of each input\n",
|
||||
"D = 8\n",
|
||||
"# Create an empty list\n",
|
||||
"X = np.random.normal(size=(D,N))\n",
|
||||
"# Print X\n",
|
||||
"print(X)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "oAygJwLiCSri"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"We'll use two heads. We'll need the weights and biases for the keys, queries, and values (equations 12.2 and 12.4). We'll use two heads, and (as in the figure), we'll make the queries keys and values of size D/H"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "W2iHFbtKMaDp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Number of heads\n",
|
||||
"H = 2\n",
|
||||
"# QDV dimension\n",
|
||||
"H_D = int(D/H)\n",
|
||||
"\n",
|
||||
"# Set seed so we get the same random numbers\n",
|
||||
"np.random.seed(0)\n",
|
||||
"\n",
|
||||
"# Choose random values for the parameters for the first head\n",
|
||||
"omega_q1 = np.random.normal(size=(H_D,D))\n",
|
||||
"omega_k1 = np.random.normal(size=(H_D,D))\n",
|
||||
"omega_v1 = np.random.normal(size=(H_D,D))\n",
|
||||
"beta_q1 = np.random.normal(size=(H_D,1))\n",
|
||||
"beta_k1 = np.random.normal(size=(H_D,1))\n",
|
||||
"beta_v1 = np.random.normal(size=(H_D,1))\n",
|
||||
"\n",
|
||||
"# Choose random values for the parameters for the second head\n",
|
||||
"omega_q2 = np.random.normal(size=(H_D,D))\n",
|
||||
"omega_k2 = np.random.normal(size=(H_D,D))\n",
|
||||
"omega_v2 = np.random.normal(size=(H_D,D))\n",
|
||||
"beta_q2 = np.random.normal(size=(H_D,1))\n",
|
||||
"beta_k2 = np.random.normal(size=(H_D,1))\n",
|
||||
"beta_v2 = np.random.normal(size=(H_D,1))\n",
|
||||
"\n",
|
||||
"# Choose random values for the parameters\n",
|
||||
"omega_c = np.random.normal(size=(D,D))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "79TSK7oLMobe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's compute the multiscale self-attention"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "VxaKQtP3Ng6R"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define softmax operation that works independently on each column\n",
|
||||
"def softmax_cols(data_in):\n",
|
||||
" # Exponentiate all of the values\n",
|
||||
" exp_values = np.exp(data_in) ;\n",
|
||||
" # Sum over columns\n",
|
||||
" denom = np.sum(exp_values, axis = 0);\n",
|
||||
" # Replicate denominator to N rows\n",
|
||||
" denom = np.matmul(np.ones((data_in.shape[0],1)), denom[np.newaxis,:])\n",
|
||||
" # Compute softmax\n",
|
||||
" softmax = exp_values / denom\n",
|
||||
" # return the answer\n",
|
||||
" return softmax"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "obaQBdUAMXXv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
" # Now let's compute self attention in matrix form\n",
|
||||
"def multihead_scaled_self_attention(X,omega_v1, omega_q1, omega_k1, beta_v1, beta_q1, beta_k1, omega_v2, omega_q2, omega_k2, beta_v2, beta_q2, beta_k2, omega_c):\n",
|
||||
"\n",
|
||||
" # TODO Write the multihead scaled self-attention mechanism.\n",
|
||||
" # Replace this line\n",
|
||||
" X_prime = np.zeros_like(X) ;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" return X_prime"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gb2WvQ3SiH8r"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run the self attention mechanism\n",
|
||||
"X_prime = multihead_scaled_self_attention(X,omega_v1, omega_q1, omega_k1, beta_v1, beta_q1, beta_k1, omega_v2, omega_q2, omega_k2, beta_v2, beta_q2, beta_k2, omega_c)\n",
|
||||
"\n",
|
||||
"# Print out the results\n",
|
||||
"np.set_printoptions(precision=3)\n",
|
||||
"print(\"Your answer:\")\n",
|
||||
"print(X_prime)\n",
|
||||
"\n",
|
||||
"print(\"True values:\")\n",
|
||||
"print(\"[[-21.207 -5.373 -20.933 -9.179 -11.319 -17.812]\")\n",
|
||||
"print(\" [ -1.995 7.906 -10.516 3.452 9.863 -7.24 ]\")\n",
|
||||
"print(\" [ 5.479 1.115 9.244 0.453 5.656 7.089]\")\n",
|
||||
"print(\" [ -7.413 -7.416 0.363 -5.573 -6.736 -0.848]\")\n",
|
||||
"print(\" [-11.261 -9.937 -4.848 -8.915 -13.378 -5.761]\")\n",
|
||||
"print(\" [ 3.548 10.036 -2.244 1.604 12.113 -2.557]\")\n",
|
||||
"print(\" [ 4.888 -5.814 2.407 3.228 -4.232 3.71 ]\")\n",
|
||||
"print(\" [ 1.248 18.894 -6.409 3.224 19.717 -5.629]]\")\n",
|
||||
"\n",
|
||||
"# If your answers don't match, then make sure that you are doing the scaling, and make sure the scaling value is correct"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MUOJbgJskUpl"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
341
Notebooks/Chap12/12_3_Tokenization.ipynb
Normal file
341
Notebooks/Chap12/12_3_Tokenization.ipynb
Normal file
@@ -0,0 +1,341 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyP0/KodWM9Dtr2x+8MdXXH1",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap12/12_3_Tokenization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 12.3: Tokenization**\n",
|
||||
"\n",
|
||||
"This notebook builds set of tokens from a text string as in figure 12.8 of the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"I adapted this code from *SOMEWHERE*. If anyone recognizes it, can you let me know and I will give the proper attribution or rewrite if the license is not permissive.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import re, collections"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "3_WkaFO3OfLi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"text = \"a sailor went to sea sea sea \"+\\\n",
|
||||
" \"to see what he could see see see \"+\\\n",
|
||||
" \"but all that he could see see see \"+\\\n",
|
||||
" \"was the bottom of the deep blue sea sea sea\""
|
||||
],
|
||||
"metadata": {
|
||||
"id": "tVZVuauIXmJk"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Tokenize the input sentence To begin with the tokens are the individual letters and the </w> whitespace token. So, we represent each word in terms of these tokens with spaces between the tokens to delineate them.\n",
|
||||
"\n",
|
||||
"The tokenized text is stored in a structure that represents each word as tokens together with the count of how often that word occurs. We'll call this the *vocabulary*."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fF2RBrouWV5w"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def initialize_vocabulary(text):\n",
|
||||
" vocab = collections.defaultdict(int)\n",
|
||||
" words = text.strip().split()\n",
|
||||
" for word in words:\n",
|
||||
" vocab[' '.join(list(word)) + ' </w>'] += 1\n",
|
||||
" return vocab"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OfvXkLSARk4_"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"vocab = initialize_vocabulary(text)\n",
|
||||
"print('Vocabulary: {}'.format(vocab))\n",
|
||||
"print('Size of vocabulary: {}'.format(len(vocab)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "aydmNqaoOpSm"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Find all the tokens in the current vocabulary and their frequencies"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fJAiCjphWsI9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_tokens_and_frequencies(vocab):\n",
|
||||
" tokens = collections.defaultdict(int)\n",
|
||||
" for word, freq in vocab.items():\n",
|
||||
" word_tokens = word.split()\n",
|
||||
" for token in word_tokens:\n",
|
||||
" tokens[token] += freq\n",
|
||||
" return tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "qYi6F_K3RYsW"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"tokens = get_tokens_and_frequencies(vocab)\n",
|
||||
"print('Tokens: {}'.format(tokens))\n",
|
||||
"print('Number of tokens: {}'.format(len(tokens)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Y4LCVGnvXIwp"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Find each pair of adjacent tokens in the vocabulary\n",
|
||||
"and count them. We will subsequently merge the most frequently occurring pair."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_-Rh1mD_Ww3b"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_pairs_and_counts(vocab):\n",
|
||||
" pairs = collections.defaultdict(int)\n",
|
||||
" for word, freq in vocab.items():\n",
|
||||
" symbols = word.split()\n",
|
||||
" for i in range(len(symbols)-1):\n",
|
||||
" pairs[symbols[i],symbols[i+1]] += freq\n",
|
||||
" return pairs"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OqJTB3UFYubH"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"pairs = get_pairs_and_counts(vocab)\n",
|
||||
"print('Pairs: {}'.format(pairs))\n",
|
||||
"print('Number of distinct pairs: {}'.format(len(pairs)))\n",
|
||||
"\n",
|
||||
"most_frequent_pair = max(pairs, key=pairs.get)\n",
|
||||
"print('Most frequent pair: {}'.format(most_frequent_pair))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "d-zm0JBcZSjS"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Merge the instances of the best pair in the vocabulary"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pcborzqIXQFS"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def merge_pair_in_vocabulary(pair, vocab_in):\n",
|
||||
" vocab_out = {}\n",
|
||||
" bigram = re.escape(' '.join(pair))\n",
|
||||
" p = re.compile(r'(?<!\\S)' + bigram + r'(?!\\S)')\n",
|
||||
" for word in vocab_in:\n",
|
||||
" word_out = p.sub(''.join(pair), word)\n",
|
||||
" vocab_out[word_out] = vocab_in[word]\n",
|
||||
" return vocab_out"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "xQI6NALdWQZX"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"vocab = merge_pair_in_vocabulary(most_frequent_pair, vocab)\n",
|
||||
"print('Vocabulary: {}'.format(vocab))\n",
|
||||
"print('Size of vocabulary: {}'.format(len(vocab)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TRYeBZI3ZULu"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Update the tokens, which now include the best token 'se'"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "bkhUx3GeXwba"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"tokens = get_tokens_and_frequencies(vocab)\n",
|
||||
"print('Tokens: {}'.format(tokens))\n",
|
||||
"print('Number of tokens: {}'.format(len(tokens)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Fqj-vQWeXxQi"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's write the full tokenization routine"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K_hKp2kSXXS1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- write this routine by filling in this missing parts,\n",
|
||||
"# calling the above routines\n",
|
||||
"def tokenize(text, num_merges):\n",
|
||||
" # Initialize the vocabulary from the input text\n",
|
||||
" # vocab = (your code here)\n",
|
||||
"\n",
|
||||
" for i in range(num_merges):\n",
|
||||
" # Find the tokens and how often they occur in the vocabulary\n",
|
||||
" # tokens = (your code here)\n",
|
||||
"\n",
|
||||
" # Find the pairs of adjacent tokens and their counts\n",
|
||||
" # pairs = (your code here)\n",
|
||||
"\n",
|
||||
" # Find the most frequent pair\n",
|
||||
" # most_frequent_pair = (your code here)\n",
|
||||
" print('Most frequent pair: {}'.format(most_frequent_pair))\n",
|
||||
"\n",
|
||||
" # Merge the code in the vocabulary\n",
|
||||
" # vocab = (your code here)\n",
|
||||
"\n",
|
||||
" # Find the tokens and how often they occur in the vocabulary one last time\n",
|
||||
" # tokens = (your code here)\n",
|
||||
"\n",
|
||||
" return tokens, vocab"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "U_1SkQRGQ8f3"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"tokens, vocab = tokenize(text, num_merges=22)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "w0EkHTrER_-I"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print('Tokens: {}'.format(tokens))\n",
|
||||
"print('Number of tokens: {}'.format(len(tokens)))\n",
|
||||
"print('Vocabulary: {}'.format(vocab))\n",
|
||||
"print('Size of vocabulary: {}'.format(len(vocab)))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "moqDtTzIb-NG"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"TODO - Consider the input text:\n",
|
||||
"\n",
|
||||
"\"How much wood could a woodchuck chuck if a woodchuck could chuck wood\"\n",
|
||||
"\n",
|
||||
"How many tokens will there be initially and what will they be?\n",
|
||||
"How many tokens will there be if we run the tokenization routine for the maximum number of iterations (merges)?\n",
|
||||
"\n",
|
||||
"When you've made your predictions, run the code and see if you are correct."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "jOW_HJtMdAxd"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
648
Notebooks/Chap12/12_4_Decoding_Strategies.ipynb
Normal file
648
Notebooks/Chap12/12_4_Decoding_Strategies.ipynb
Normal file
@@ -0,0 +1,648 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyNPrHfkLWjy3NfDHRhGG3IE",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap12/12_4_Decoding_Strategies.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 12.4: Decoding strategies**\n",
|
||||
"\n",
|
||||
"This practical investigates neural decoding from transformer models. \n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RnIUiieJWu6e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install transformers"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7abjZ9pMVj3k"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from transformers import GPT2LMHeadModel, GPT2Tokenizer, set_seed\n",
|
||||
"import torch\n",
|
||||
"import torch.nn.functional as F\n",
|
||||
"import numpy as np"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sMOyD0zem2Ef"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Load model and tokenizer\n",
|
||||
"model = GPT2LMHeadModel.from_pretrained('gpt2')\n",
|
||||
"tokenizer = GPT2Tokenizer.from_pretrained('gpt2')"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pZgfxbzKWNSR"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Decoding from GPT2\n",
|
||||
"\n",
|
||||
"This tutorial investigates how to use GPT2 (the forerunner of GPT3) to generate text. There are a number of ways to do this that trade-off the realism of the text against the amount of variation.\n",
|
||||
"\n",
|
||||
"At every stage, GPT2 takes an input string and returns a probability for each of the possible subsequent tokens. We can choose what to do with these probability. We could always *greedily choose* the most likely next token, or we could draw a *sample* randomly according to the probabilities. There are also intermediate strategies such as *top-k sampling* and *nucleus sampling*, that have some controlled randomness.\n",
|
||||
"\n",
|
||||
"We'll also investigate *beam search* -- the idea is that rather than greedily take the next best token at each stage, we maintain a set of hypotheses (beams)as we add each subsequent token and return the most likely overall hypothesis. This is not necessarily the same result we get from greedily choosing the next token."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TfhAGy0TXEvV"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"First, let's investigate the token themselves. The code below prints out the vocabulary size and shows 20 random tokens. "
|
||||
],
|
||||
"metadata": {
|
||||
"id": "vsmO9ptzau3_"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"np.random.seed(1)\n",
|
||||
"print(\"Number of tokens in dictionary = %d\"%(tokenizer.vocab_size))\n",
|
||||
"for i in range(20):\n",
|
||||
" index = np.random.randint(tokenizer.vocab_size)\n",
|
||||
" print(\"Token: %d \"%(index)+tokenizer.decode(torch.tensor(index), skip_special_tokens=True))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dmmBNS5GY_yk"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Sampling\n",
|
||||
"\n",
|
||||
"Each time we run GPT2 it will take in a set of tokens, and return a probability over each of the possible next tokens. The simplest thing we could do is to just draw a sample from this probability distribution each time."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MUM3kLEjbTso"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def sample_next_token(input_tokens, model, tokenizer):\n",
|
||||
" # Run model to get prediction over next output\n",
|
||||
" outputs = model(input_ids = input_tokens['input_ids'], attention_mask = input_tokens['attention_mask'])\n",
|
||||
" # Find prediction\n",
|
||||
" prob_over_tokens = F.softmax(outputs.logits, dim=-1).detach().numpy()[0,-1]\n",
|
||||
" # TODO Draw a random token according to the probabilities\n",
|
||||
" # next_token should be an array with an sole integer in it (as below)\n",
|
||||
" # Use: https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html\n",
|
||||
" # Replace this line\n",
|
||||
" next_token = [5000]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Append token to sentence\n",
|
||||
" output_tokens = input_tokens\n",
|
||||
" output_tokens[\"input_ids\"] = torch.cat((output_tokens['input_ids'],torch.tensor([next_token])),dim=1)\n",
|
||||
" output_tokens['attention_mask'] = torch.cat((output_tokens['attention_mask'],torch.tensor([[1]])),dim=1)\n",
|
||||
" output_tokens['last_token_prob'] = prob_over_tokens[next_token]\n",
|
||||
"\n",
|
||||
" return output_tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TIyNgg0FkJKO"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Expected output:\n",
|
||||
"# \"The best thing about Bath is that they don't even change or shrink anymore.\"\n",
|
||||
"\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = sample_next_token(input_tokens, model, tokenizer)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "BHs-IWaz9MNY"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Modify the code below by changing the number of tokens generated and the initial sentence\n",
|
||||
"# to get a feel for how well this works. Since I didn't reset the seed, it will give a different\n",
|
||||
"# answer every time that you run it.\n",
|
||||
"\n",
|
||||
"# TODO Experiment with changing this line:\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"# TODO Experiment with changing this line:\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = sample_next_token(input_tokens, model, tokenizer)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "yN98_7WqbvIe"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Greedy token selection\n",
|
||||
"\n",
|
||||
"You probably (correctly) got the impression that the text from pure sampling of the probability model can be kind of random. How about if we choose most likely token at each step?\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7eHFLCeZcmmg"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_best_next_token(input_tokens, model, tokenizer):\n",
|
||||
" # Run model to get prediction over next output\n",
|
||||
" outputs = model(input_ids = input_tokens['input_ids'], attention_mask = input_tokens['attention_mask'])\n",
|
||||
" # Find prediction\n",
|
||||
" prob_over_tokens = F.softmax(outputs.logits, dim=-1).detach().numpy()[0,-1]\n",
|
||||
"\n",
|
||||
" # TODO -- find the token index with the maximum probability\n",
|
||||
" # It should be returns as a list (i.e., put squared brackets around it)\n",
|
||||
" # Use https://numpy.org/doc/stable/reference/generated/numpy.argmax.html\n",
|
||||
" # Replace this line\n",
|
||||
" next_token = [5000]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Append token to sentence\n",
|
||||
" output_tokens = input_tokens\n",
|
||||
" output_tokens[\"input_ids\"] = torch.cat((output_tokens['input_ids'],torch.tensor([next_token])),dim=1)\n",
|
||||
" output_tokens['attention_mask'] = torch.cat((output_tokens['attention_mask'],torch.tensor([[1]])),dim=1)\n",
|
||||
" output_tokens['last_token_prob'] = prob_over_tokens[next_token]\n",
|
||||
" return output_tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OhRzynEjxpZF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is that it's a place where you can go to\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_best_next_token(input_tokens, model, tokenizer)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gKB1Mgndj-Hm"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO Modify the code below by changing the number of tokens generated and the initial sentence\n",
|
||||
"# to get a feel for how well this works.\n",
|
||||
"\n",
|
||||
"# TODO Experiment with changing this line:\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"# TODO Experiment with changing this line:\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_best_next_token(input_tokens, model, tokenizer)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "L1YHKaYFfC0M"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Top-K sampling\n",
|
||||
"\n",
|
||||
"You probably noticed that the greedy strategy produces quite realistic text, but it's kind of boring. It produces generic answers. Also, if this was a chatbot, then we wouldn't necessarily want it to produce the same answer to a question each time. \n",
|
||||
"\n",
|
||||
"Top-K sampling is a compromise strategy that samples randomly from the top K most probable tokens. We could just choose them with a uniform distribution, or (as here) we could sample them according to their original probabilities."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "1ORFXYX_gBDT"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_top_k_token(input_tokens, model, tokenizer, k=20):\n",
|
||||
" # Run model to get prediction over next output\n",
|
||||
" outputs = model(input_ids = input_tokens['input_ids'], attention_mask = input_tokens['attention_mask'])\n",
|
||||
" # Find prediction\n",
|
||||
" prob_over_tokens = F.softmax(outputs.logits, dim=-1).detach().numpy()[0,-1]\n",
|
||||
"\n",
|
||||
" # Draw a sample from the top K most likely tokens.\n",
|
||||
" # Take copy of the probabilities and sort from largest to smallest (use np.sort)\n",
|
||||
" # TODO -- replace this line\n",
|
||||
" sorted_prob_over_tokens = prob_over_tokens\n",
|
||||
"\n",
|
||||
" # Find the probability at the k'th position\n",
|
||||
" # TODO -- replace this line\n",
|
||||
" kth_prob_value = 0.0\n",
|
||||
"\n",
|
||||
" # Set all probabilities below this value to zero\n",
|
||||
" prob_over_tokens[prob_over_tokens<kth_prob_value] = 0\n",
|
||||
"\n",
|
||||
" # Renormalize the probabilities so that they sum to one\n",
|
||||
" # TODO -- replace this line\n",
|
||||
" prob_over_tokens = prob_over_tokens\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Draw random token\n",
|
||||
" next_token = np.random.choice(len(prob_over_tokens), 1, replace=False, p=prob_over_tokens)\n",
|
||||
"\n",
|
||||
" # Append token to sentence\n",
|
||||
" output_tokens = input_tokens\n",
|
||||
" output_tokens[\"input_ids\"] = torch.cat((output_tokens['input_ids'],torch.tensor([next_token])),dim=1)\n",
|
||||
" output_tokens['attention_mask'] = torch.cat((output_tokens['attention_mask'],torch.tensor([[1]])),dim=1)\n",
|
||||
" output_tokens['last_token_prob'] = prob_over_tokens[next_token]\n",
|
||||
" return output_tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "7RFbn6c-0Z4v"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is that you get to see all the beautiful faces of\n",
|
||||
"\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_top_k_token(input_tokens, model, tokenizer, k=10)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "G3w1GVED4HYv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO\n",
|
||||
"# Experiment with different values of k\n",
|
||||
"# If you set it to a lower number (say 3) the text will be less random\n",
|
||||
"# If you set it to a higher number (say 5000) the text will be more random\n",
|
||||
"\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_top_k_token(input_tokens, model, tokenizer, k=10)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RySu2bzqpW9E"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Nucleus sampling\n",
|
||||
"\n",
|
||||
"Top-K sampling has the disadvantage that sometimes there are only a few plausible next tokens, and sometimes there are a lot. How do we adapt to this situation? One way is to sample from a fixed proportion of the probability mass. That is we order the tokens in terms of probability and cut off the possibility of sampling when the cumulative sum is greater than a threshold.\n",
|
||||
"\n",
|
||||
"This way, we adapt the number of possible tokens that we can choose."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "fOHak_QJfU-2"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def get_nucleus_sampling_token(input_tokens, model, tokenizer, thresh=0.25):\n",
|
||||
" # Run model to get prediction over next output\n",
|
||||
" outputs = model(input_ids = input_tokens['input_ids'], attention_mask = input_tokens['attention_mask'])\n",
|
||||
" # Find prediction\n",
|
||||
" prob_over_tokens = F.softmax(outputs.logits, dim=-1).detach().numpy()[0,-1]\n",
|
||||
"\n",
|
||||
" # Find the most likely tokens that make up the first (thresh) of the probability\n",
|
||||
" # TODO -- sort the probabilities in decreasing order\n",
|
||||
" # Replace this line\n",
|
||||
" sorted_probs_decreasing = prob_over_tokens\n",
|
||||
" # TODO -- compute the cumulative sum of these probabilities\n",
|
||||
" # Replace this line\n",
|
||||
" cum_sum_probs = sorted_probs_decreasing\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Find index where that the cumulative sum is greater than the threshold\n",
|
||||
" thresh_index = np.argmax(cum_sum_probs>thresh)\n",
|
||||
" print(\"Choosing from %d tokens\"%(thresh_index))\n",
|
||||
" # TODO: Find the probability value to threshold\n",
|
||||
" # Replace this line:\n",
|
||||
" thresh_prob = sorted_probs_decreasing[thresh_index]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Set any probabilities less than this to zero\n",
|
||||
" prob_over_tokens[prob_over_tokens<thresh_prob] = 0\n",
|
||||
" # Renormalize\n",
|
||||
" prob_over_tokens = prob_over_tokens / np.sum(prob_over_tokens)\n",
|
||||
" # Draw random token\n",
|
||||
" next_token = np.random.choice(len(prob_over_tokens), 1, replace=False, p=prob_over_tokens)\n",
|
||||
"\n",
|
||||
" # Append token to sentence\n",
|
||||
" output_tokens = input_tokens\n",
|
||||
" output_tokens[\"input_ids\"] = torch.cat((output_tokens['input_ids'],torch.tensor([next_token])),dim=1)\n",
|
||||
" output_tokens['attention_mask'] = torch.cat((output_tokens['attention_mask'],torch.tensor([[1]])),dim=1)\n",
|
||||
" output_tokens['last_token_prob'] = prob_over_tokens[next_token]\n",
|
||||
" return output_tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PtxS4kNDyUcm"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is that it's not a city that has been around\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_nucleus_sampling_token(input_tokens, model, tokenizer, thresh = 0.2)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "K2Vk1Ly40S6c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- experiment with setting the threshold probability to larger or smaller values\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_nucleus_sampling_token(input_tokens, model, tokenizer, thresh = 0.2)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "eQNNHe14wDvC"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Beam search\n",
|
||||
"\n",
|
||||
"All of the methods we've seen so far choose the tokens one by one. But this isn't necessarily sensible. Even greedily choosing the best token doesn't necessarily retrieve the sequence with the highest probability. It might be that the most likely token only has very unlikely tokens following it.\n",
|
||||
"\n",
|
||||
"Beam search maintains $K$ hypotheses about the best possible continuation. It starts with the top $K$ continuations. Then for each of those, it finds the top K continuations, giving $K^2$ hypotheses. Then it retains just the top $K$ of these so that the number of hypotheses stays the same."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "WMMNeLixwlgM"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# This routine returns the k'th most likely next token.\n",
|
||||
"# If k =0 then it returns the most likely token, if k=1 it returns the next most likely and so on\n",
|
||||
"# We will need this for beam search\n",
|
||||
"def get_kth_most_likely_token(input_tokens, model, tokenizer, k):\n",
|
||||
" # Run model to get prediction over next output\n",
|
||||
" outputs = model(input_ids = input_tokens['input_ids'], attention_mask = input_tokens['attention_mask'])\n",
|
||||
" # Find prediction\n",
|
||||
" prob_over_tokens = F.softmax(outputs.logits, dim=-1).detach().numpy()[0,-1]\n",
|
||||
"\n",
|
||||
" # Find the k'th most likely token\n",
|
||||
" # TODO Sort the probabilities from largest to smallest\n",
|
||||
" # Replace this line:\n",
|
||||
" sorted_prob_over_tokens = prob_over_tokens\n",
|
||||
" # TODO Find the k'th sorted probability\n",
|
||||
" # Replace this line\n",
|
||||
" kth_prob_value = prob_over_tokens[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Find position of this token.\n",
|
||||
" next_token = np.where(prob_over_tokens == kth_prob_value)[0]\n",
|
||||
"\n",
|
||||
" # Append token to sentence\n",
|
||||
" output_tokens = input_tokens\n",
|
||||
" output_tokens[\"input_ids\"] = torch.cat((output_tokens['input_ids'],torch.tensor([next_token])),dim=1)\n",
|
||||
" output_tokens['attention_mask'] = torch.cat((output_tokens['attention_mask'],torch.tensor([[1]])),dim=1)\n",
|
||||
" output_tokens['last_token_prob'] = prob_over_tokens[next_token]\n",
|
||||
" output_tokens['log_prob'] = output_tokens['log_prob'] + np.log(prob_over_tokens[next_token])\n",
|
||||
" return output_tokens"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "sAI2bClXCe2F"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# We can test this code and see that if we choose the 2nd most likely (K=1) token each time\n",
|
||||
"# then we get much better generation results than if we choose the 2001st most likely token\n",
|
||||
"\n",
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is the way you get the most bang outta the\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"input_tokens['log_prob'] = 0.0\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_kth_most_likely_token(input_tokens, model, tokenizer, k=1)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))\n",
|
||||
"\n",
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is mixed profits partnerships» buy generic+ Honda throttlecont\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"input_tokens['log_prob'] = 0.0\n",
|
||||
"for i in range(10):\n",
|
||||
" input_tokens = get_kth_most_likely_token(input_tokens, model, tokenizer, k=2000)\n",
|
||||
" print(tokenizer.decode(input_tokens[\"input_ids\"][0], skip_special_tokens=True))\n",
|
||||
"\n",
|
||||
"# TODO -- play around with different values of K"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "6kSc0WrTELMd"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Print out each beam plus the log probability\n",
|
||||
"def print_beams(beams):\n",
|
||||
" for index,beam in enumerate(beams):\n",
|
||||
" print(\"Beam %d, Prob %3.3f: \"%(index,beam['log_prob'])+tokenizer.decode(beam[\"input_ids\"][0], skip_special_tokens=True))\n",
|
||||
" print('---')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# TODO: Read this code carefully!\n",
|
||||
"def do_beam_search(input_tokens_in, model, tokenizer, n_beam=5, beam_length=10):\n",
|
||||
" # Store beams in a list\n",
|
||||
" input_tokens['log_prob'] = 0.0\n",
|
||||
"\n",
|
||||
" # Initialize with n_beam most likely continuations\n",
|
||||
" beams = [None] * n_beam\n",
|
||||
" for c_k in range(n_beam):\n",
|
||||
" beams[c_k] = dict(input_tokens_in)\n",
|
||||
" beams[c_k] = get_kth_most_likely_token(beams[c_k], model, tokenizer, c_k)\n",
|
||||
"\n",
|
||||
" print_beams(beams)\n",
|
||||
"\n",
|
||||
" # For each token in the sequence we will add\n",
|
||||
" for c_pos in range(beam_length-1):\n",
|
||||
" # Now for each beam, we continue it in the most likely ways, making n_beam*n_beam type hypotheses\n",
|
||||
" beams_all = [None] * (n_beam*n_beam)\n",
|
||||
" log_probs_all = np.zeros(n_beam*n_beam)\n",
|
||||
" # For each current hypothesis\n",
|
||||
" for c_beam in range(n_beam):\n",
|
||||
" # For each continuation\n",
|
||||
" for c_k in range(n_beam):\n",
|
||||
" # Store the continuation and the probability\n",
|
||||
" beams_all[c_beam * n_beam + c_k] = dict(get_kth_most_likely_token(beams[c_beam], model, tokenizer, c_k))\n",
|
||||
" log_probs_all[c_beam * n_beam + c_k] = beams_all[c_beam * n_beam + c_k]['log_prob']\n",
|
||||
"\n",
|
||||
" # Keep the best n_beams sequences with the highest probabilities\n",
|
||||
" sorted_index = np.argsort(np.array(log_probs_all)*-1)\n",
|
||||
" for c_k in range(n_beam):\n",
|
||||
" beams[c_k] = dict(beams_all[sorted_index[c_k]])\n",
|
||||
"\n",
|
||||
" # Print the beams\n",
|
||||
" print_beams(beams)\n",
|
||||
"\n",
|
||||
" return beams[0]"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Y4hFfwPFFxka"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Expected output:\n",
|
||||
"# The best thing about Bath is that it's a place where you don't have to\n",
|
||||
"\n",
|
||||
"set_seed(0)\n",
|
||||
"input_txt = \"The best thing about Bath is\"\n",
|
||||
"input_tokens = tokenizer(input_txt, return_tensors='pt')\n",
|
||||
"\n",
|
||||
"# Now let's call the beam search\n",
|
||||
"# It takes a while as it has to run the model multiple times to add a token\n",
|
||||
"n_beams = 5\n",
|
||||
"best_beam = do_beam_search(input_tokens,model,tokenizer)\n",
|
||||
"print(\"Beam search result:\")\n",
|
||||
"print(tokenizer.decode(best_beam[\"input_ids\"][0], skip_special_tokens=True))\n",
|
||||
"\n",
|
||||
"# You should see that the best answer is not the same as the greedy solution we found above\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "0YWKwZmz4NXb"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"You can read about more decoding strategies in this blog (which uses a recursive neural network, not a transformer, but the principles are the same).\n",
|
||||
"\n",
|
||||
"https://www.borealisai.com/research-blogs/tutorial-6-neural-natural-language-generation-decoding-algorithms/\n",
|
||||
"\n",
|
||||
"You can also look at other possible language models via hugging face:\n",
|
||||
"\n",
|
||||
"https://huggingface.co/docs/transformers/v4.25.1/en/model_summary#decoders-or-autoregressive-models\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "-SXpjZPYsMhv"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
159
Notebooks/Chap13/13_1_Graph_Representation.ipynb
Normal file
159
Notebooks/Chap13/13_1_Graph_Representation.ipynb
Normal file
@@ -0,0 +1,159 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMuzP1/oqTRTw4Xs/R4J/M3",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap13/13_1_Graph_Representation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 13.1: Graph representation**\n",
|
||||
"\n",
|
||||
"This notebook investigates representing graphs with matrices as illustrated in figure 13.4 from the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import networkx as nx"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OLComQyvCIJ7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Routine to draw graph structure\n",
|
||||
"def draw_graph_structure(adjacency_matrix):\n",
|
||||
"\n",
|
||||
" G = nx.Graph()\n",
|
||||
" n_node = adjacency_matrix.shape[0]\n",
|
||||
" for i in range(n_node):\n",
|
||||
" for j in range(i):\n",
|
||||
" if adjacency_matrix[i,j]:\n",
|
||||
" G.add_edge(i,j)\n",
|
||||
"\n",
|
||||
" nx.draw(G, nx.spring_layout(G, seed = 0), with_labels=True)\n",
|
||||
" plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "O1QMxC7X4vh9"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a graph\n",
|
||||
"# Note that the nodes are labelled from 0 rather than 1 as in the book\n",
|
||||
"A = np.array([[0,1,0,1,0,0,0,0],\n",
|
||||
" [1,0,1,1,1,0,0,0],\n",
|
||||
" [0,1,0,0,1,0,0,0],\n",
|
||||
" [1,1,0,0,1,0,0,0],\n",
|
||||
" [0,1,1,1,0,1,0,1],\n",
|
||||
" [0,0,0,0,1,0,1,1],\n",
|
||||
" [0,0,0,0,0,1,0,0],\n",
|
||||
" [0,0,0,0,1,1,0,0]]);\n",
|
||||
"print(A)\n",
|
||||
"draw_graph_structure(A)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TIrihEw-7DRV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- find algorithmically how many walks of length three are between nodes 3 and 7\n",
|
||||
"# Replace this line\n",
|
||||
"print(\"Number of walks between nodes three and seven = ???\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "PzvfUpkV4zCj"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO -- find algorithmically what the minimum path distance between nodes 0 and 6 is\n",
|
||||
"# (i.e. what is the first walk length with non-zero count between 0 and 6)\n",
|
||||
"# Replace this line\n",
|
||||
"print(\"Minimum distance = ???\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# What is the worst case complexity of your method?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MhhJr6CgCRb5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Now let's represent node 0 as a vector\n",
|
||||
"x = np.array([[1],[0],[0],[0],[0],[0],[0],[0]]);\n",
|
||||
"print(x)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "lCQjXlatABGZ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# TODO: Find algorithmically how many paths of length 3 are there between node 0 and every other node\n",
|
||||
"# Replace this line\n",
|
||||
"print(np.zeros_like(x))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "nizLdZgLDzL4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
244
Notebooks/Chap13/13_2_Graph_Classification.ipynb
Normal file
244
Notebooks/Chap13/13_2_Graph_Classification.ipynb
Normal file
@@ -0,0 +1,244 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyOMSGUFWT+YN0fwYHpMmHJM",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap13/13_2_Graph_Classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 13.2: Graph classification**\n",
|
||||
"\n",
|
||||
"This notebook investigates representing graphs with matrices as illustrated in figure 13.4 from the book.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import networkx as nx"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OLComQyvCIJ7"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Let's build a model that maps a chemical structure to a binary decision. This model might be used to predict whether a chemical is liquid at room temparature or not. We'll start by drawing the chemical structure."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UNleESc7k5uB"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define a graph that represents the chemical structure of ethanol and draw it\n",
|
||||
"# Each node is labelled with the node number and the element (carbon, hydrogen, oxygen)\n",
|
||||
"G = nx.Graph()\n",
|
||||
"G.add_edge('0:H','2:C')\n",
|
||||
"G.add_edge('1:H','2:C')\n",
|
||||
"G.add_edge('3:H','2:C')\n",
|
||||
"G.add_edge('2:C','5:C')\n",
|
||||
"G.add_edge('4:H','5:C')\n",
|
||||
"G.add_edge('6:H','5:C')\n",
|
||||
"G.add_edge('7:O','5:C')\n",
|
||||
"G.add_edge('8:H','7:O')\n",
|
||||
"nx.draw(G, nx.spring_layout(G, seed = 0), with_labels=True, node_size=600)\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "TIrihEw-7DRV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Define adjacency matrix\n",
|
||||
"# TODO -- Define the adjacency matrix for this chemical\n",
|
||||
"# Replace this line\n",
|
||||
"A = np.zeros((9,9)) ;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(A)\n",
|
||||
"\n",
|
||||
"# TODO -- Define node matrix\n",
|
||||
"# There will be 9 nodes and 118 possible chemical elements\n",
|
||||
"# so we'll define a 9x118 matrix. Each column represents one\n",
|
||||
"# node and is a one-hot vector (i.e. all zeros, except a single one at the\n",
|
||||
"# chemical number of the element).\n",
|
||||
"# Chemical numbers: Hydrogen-->1, Carbon-->6, Oxygen-->8\n",
|
||||
"# Since the indices start at 0, we'll set element 0 to 1 for hydrogen, element 5\n",
|
||||
"# to one for carbon, and element 7 to one for oxygen\n",
|
||||
"# Replace this line:\n",
|
||||
"X = np.zeros((118,9))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Print the top 15 rows of the data matrix\n",
|
||||
"print(X[0:15,:])"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "gKBD5JsPfrkA"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Now let's define a network with four layers that maps this graph to a binary value, using the formulation in equation 13.11."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "40FLjNIcpHa9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# We'll need these helper functions\n",
|
||||
"\n",
|
||||
"# Define the Rectified Linear Unit (ReLU) function\n",
|
||||
"def ReLU(preactivation):\n",
|
||||
" activation = preactivation.clip(0.0)\n",
|
||||
" return activation\n",
|
||||
"\n",
|
||||
"# Define the logistic sigmoid function\n",
|
||||
"def sigmoid(x):\n",
|
||||
" return 1.0/(1.0+np.exp(-x))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "52IFREpepHE4"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Our network will have K=3 hidden layers, and will use a dimension of D=200.\n",
|
||||
"K = 3; D = 200\n",
|
||||
"# Set seed so we always get the same random numbers\n",
|
||||
"np.random.seed(1)\n",
|
||||
"# Let's initialize the parameter matrices randomly with He initialization\n",
|
||||
"Omega0 = np.random.normal(size=(D, 118)) * 2.0 / D\n",
|
||||
"beta0 = np.random.normal(size=(D,1)) * 2.0 / D\n",
|
||||
"Omega1 = np.random.normal(size=(D, D)) * 2.0 / D\n",
|
||||
"beta1 = np.random.normal(size=(D,1)) * 2.0 / D\n",
|
||||
"Omega2 = np.random.normal(size=(D, D)) * 2.0 / D\n",
|
||||
"beta2 = np.random.normal(size=(D,1)) * 2.0 / D\n",
|
||||
"omega3 = np.random.normal(size=(1, D))\n",
|
||||
"beta3 = np.random.normal(size=(1,1))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ag0YdEgnpApK"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"def graph_neural_network(A,X, Omega0, beta0, Omega1, beta1, Omega2, beta2, omega3, beta3):\n",
|
||||
" # Define this network according to equation 13.11 from the book\n",
|
||||
" # Replace this line\n",
|
||||
" f = np.ones((1,1))\n",
|
||||
"\n",
|
||||
" return f;"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "RQuTMc2WrsU3"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's test this network\n",
|
||||
"f = graph_neural_network(A,X, Omega0, beta0, Omega1, beta1, Omega2, beta2, omega3, beta3)\n",
|
||||
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.498010\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "X7gYgOu6uIAt"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Let's check that permuting the indices of the graph doesn't change\n",
|
||||
"# the output of the network\n",
|
||||
"# Define a permutation matrix\n",
|
||||
"P = np.array([[0,1,0,0,0,0,0,0,0],\n",
|
||||
" [0,0,0,0,1,0,0,0,0],\n",
|
||||
" [0,0,0,0,0,1,0,0,0],\n",
|
||||
" [0,0,0,0,0,0,0,0,1],\n",
|
||||
" [1,0,0,0,0,0,0,0,0],\n",
|
||||
" [0,0,1,0,0,0,0,0,0],\n",
|
||||
" [0,0,0,1,0,0,0,0,0],\n",
|
||||
" [0,0,0,0,0,0,0,1,0],\n",
|
||||
" [0,0,0,0,0,0,1,0,0]]);\n",
|
||||
"\n",
|
||||
"# TODO -- Use this matrix to permute the adjacency matrix A and node matrix X\n",
|
||||
"# Replace these lines\n",
|
||||
"A_permuted = np.copy(A)\n",
|
||||
"X_permuted = np.copy(X)\n",
|
||||
"\n",
|
||||
"f = graph_neural_network(A_permuted,X_permuted, Omega0, beta0, Omega1, beta1, Omega2, beta2, omega3, beta3)\n",
|
||||
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.498010\")"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "F0zc3U_UuR5K"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"TODO -- encode the adjacency matrix and node matrix for propanol and run the network again. Show that the network still runs even though the size of the input graph is different.\n",
|
||||
"\n",
|
||||
"Propanol structure can be found [here](https://upload.wikimedia.org/wikipedia/commons/b/b8/Propanol_flat_structure.png)."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "l44vHi50zGqY"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
1
Notebooks/Info.txt
Normal file
1
Notebooks/Info.txt
Normal file
@@ -0,0 +1 @@
|
||||
This directory contains the Python notebooks referenced in the margins of the main text
|
||||
277
Notesbooks/Chap11/11_2_Residual_Networks.ipynb
Normal file
277
Notesbooks/Chap11/11_2_Residual_Networks.ipynb
Normal file
@@ -0,0 +1,277 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"authorship_tag": "ABX9TyMJvfoCDFcSK7Z0/HkcGunb",
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notesbooks/Chap11/11_2_Residual_Networks.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# **Notebook 11.2: Residual Networks**\n",
|
||||
"\n",
|
||||
"This notebook adapts the networks for MNIST1D to use residual connections.\n",
|
||||
"\n",
|
||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||
"\n",
|
||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "t9vk9Elugvmi"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
||||
"!git clone https://github.com/greydanus/mnist1d"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "D5yLObtZCi9J"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import os\n",
|
||||
"import torch, torch.nn as nn\n",
|
||||
"from torch.utils.data import TensorDataset, DataLoader\n",
|
||||
"from torch.optim.lr_scheduler import StepLR\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import mnist1d\n",
|
||||
"import random"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YrXWAH7sUWvU"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"args = mnist1d.data.get_dataset_args()\n",
|
||||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||
"\n",
|
||||
"# The training and test input and outputs are in\n",
|
||||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n",
|
||||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n",
|
||||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n",
|
||||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "twI72ZCrCt5z"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Load in the data\n",
|
||||
"train_data_x = data['x'].transpose()\n",
|
||||
"train_data_y = data['y']\n",
|
||||
"val_data_x = data['x_test'].transpose()\n",
|
||||
"val_data_y = data['y_test']\n",
|
||||
"# Print out sizes\n",
|
||||
"print(\"Train data: %d examples (columns), each of which has %d dimensions (rows)\"%((train_data_x.shape[1],train_data_x.shape[0])))\n",
|
||||
"print(\"Validation data: %d examples (columns), each of which has %d dimensions (rows)\"%((val_data_x.shape[1],val_data_x.shape[0])))"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "8bKADvLHbiV5"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Define the network"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_sFvRDGrl4qe"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# There are 40 input dimensions and 10 output dimensions for this data\n",
|
||||
"# The inputs correspond to the 40 offsets in the MNIST1D template.\n",
|
||||
"D_i = 40\n",
|
||||
"# The outputs correspond to the 10 digits\n",
|
||||
"D_o = 10\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# We will adapt this model to have residual connections around the linear layers\n",
|
||||
"# This is the same model we used in practical 8.1, but we can't use the sequential\n",
|
||||
"# class for residual networks (which aren't strictly sequential). Hence, I've rewritten\n",
|
||||
"# it as a model that inherits from a base class\n",
|
||||
"\n",
|
||||
"class ResidualNetwork(torch.nn.Module):\n",
|
||||
" def __init__(self, input_size, output_size, hidden_size=100):\n",
|
||||
" super(ResidualNetwork, self).__init__()\n",
|
||||
" self.linear1 = nn.Linear(input_size, hidden_size)\n",
|
||||
" self.linear2 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear3 = nn.Linear(hidden_size, hidden_size)\n",
|
||||
" self.linear4 = nn.Linear(hidden_size, output_size)\n",
|
||||
" print(\"Initialized MLPBase model with {} parameters\".format(self.count_params()))\n",
|
||||
"\n",
|
||||
" def count_params(self):\n",
|
||||
" return sum([p.view(-1).shape[0] for p in self.parameters()])\n",
|
||||
"\n",
|
||||
"# # TODO -- Add residual connections to this model\n",
|
||||
"# # The order of operations should similar to figure 11.5b\n",
|
||||
"# # linear1 first, ReLU+linear2 in first residual block, ReLU+linear3 in second residual block), linear4 at end\n",
|
||||
"# # Replace this function\n",
|
||||
" def forward(self, x):\n",
|
||||
" h1 = self.linear1(x).relu()\n",
|
||||
" h2 = self.linear2(h1).relu()\n",
|
||||
" h3 = self.linear3(h2).relu()\n",
|
||||
" return self.linear4(h3)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FslroPJJffrh"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# He initialization of weights\n",
|
||||
"def weights_init(layer_in):\n",
|
||||
" if isinstance(layer_in, nn.Linear):\n",
|
||||
" nn.init.kaiming_uniform_(layer_in.weight)\n",
|
||||
" layer_in.bias.data.fill_(0.0)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YgLaex1pfhqz"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#Define the model\n",
|
||||
"model = ResidualNetwork(40, 10)\n",
|
||||
"\n",
|
||||
"# choose cross entropy loss function (equation 5.24 in the loss notes)\n",
|
||||
"loss_function = nn.CrossEntropyLoss()\n",
|
||||
"# construct SGD optimizer and initialize learning rate and momentum\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||
"# object that decreases learning rate by half every 20 epochs\n",
|
||||
"scheduler = StepLR(optimizer, step_size=20, gamma=0.5)\n",
|
||||
"# create 100 dummy data points and store in data loader class\n",
|
||||
"x_train = torch.tensor(train_data_x.transpose().astype('float32'))\n",
|
||||
"y_train = torch.tensor(train_data_y.astype('long'))\n",
|
||||
"x_val= torch.tensor(val_data_x.transpose().astype('float32'))\n",
|
||||
"y_val = torch.tensor(val_data_y.astype('long'))\n",
|
||||
"\n",
|
||||
"# load the data into a class that creates the batches\n",
|
||||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n",
|
||||
"\n",
|
||||
"# Initialize model weights\n",
|
||||
"model.apply(weights_init)\n",
|
||||
"\n",
|
||||
"# loop over the dataset n_epoch times\n",
|
||||
"n_epoch = 100\n",
|
||||
"# store the loss and the % correct at each epoch\n",
|
||||
"losses_train = np.zeros((n_epoch))\n",
|
||||
"errors_train = np.zeros((n_epoch))\n",
|
||||
"losses_val = np.zeros((n_epoch))\n",
|
||||
"errors_val = np.zeros((n_epoch))\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epoch):\n",
|
||||
" # loop over batches\n",
|
||||
" for i, data in enumerate(data_loader):\n",
|
||||
" # retrieve inputs and labels for this batch\n",
|
||||
" x_batch, y_batch = data\n",
|
||||
" # zero the parameter gradients\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" # forward pass -- calculate model output\n",
|
||||
" pred = model(x_batch)\n",
|
||||
" # compute the loss\n",
|
||||
" loss = loss_function(pred, y_batch)\n",
|
||||
" # backward pass\n",
|
||||
" loss.backward()\n",
|
||||
" # SGD update\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" # Run whole dataset to get statistics -- normally wouldn't do this\n",
|
||||
" pred_train = model(x_train)\n",
|
||||
" pred_val = model(x_val)\n",
|
||||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n",
|
||||
" _, predicted_val_class = torch.max(pred_val.data, 1)\n",
|
||||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n",
|
||||
" errors_val[epoch]= 100 - 100 * (predicted_val_class == y_val).float().sum() / len(y_val)\n",
|
||||
" losses_train[epoch] = loss_function(pred_train, y_train).item()\n",
|
||||
" losses_val[epoch]= loss_function(pred_val, y_val).item()\n",
|
||||
" print(f'Epoch {epoch:5d}, train loss {losses_train[epoch]:.6f}, train error {errors_train[epoch]:3.2f}, val loss {losses_val[epoch]:.6f}, percent error {errors_val[epoch]:3.2f}')\n",
|
||||
"\n",
|
||||
" # tell scheduler to consider updating learning rate\n",
|
||||
" scheduler.step()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NYw8I_3mmX5c"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Plot the results\n",
|
||||
"fig, ax = plt.subplots()\n",
|
||||
"ax.plot(errors_train,'r-',label='train')\n",
|
||||
"ax.plot(errors_val,'b-',label='test')\n",
|
||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||
"ax.set_title('TrainError %3.2f, Val Error %3.2f'%(errors_train[-1],errors_val[-1]))\n",
|
||||
"ax.legend()\n",
|
||||
"plt.show()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "CcP_VyEmE2sv"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"The primary motivation of residual networks is to allow training of much deeper networks. \n",
|
||||
"\n",
|
||||
"TODO: Try running this network with and without the residual connections. Does adding the residual connections change the performance?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wMmqhmxuAx0M"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
BIN
PDFFigures/UDLAppendixPDF.zip
Normal file
BIN
PDFFigures/UDLAppendixPDF.zip
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
PDFFigures/UDLChap14PDF.zip
Normal file
BIN
PDFFigures/UDLChap14PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap15PDF.zip
Normal file
BIN
PDFFigures/UDLChap15PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap16PDF.zip
Normal file
BIN
PDFFigures/UDLChap16PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap17PDF.zip
Normal file
BIN
PDFFigures/UDLChap17PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap18PDF.zip
Normal file
BIN
PDFFigures/UDLChap18PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap19PDF.zip
Normal file
BIN
PDFFigures/UDLChap19PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap1PDF.zip
Normal file
BIN
PDFFigures/UDLChap1PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap20PDF.zip
Normal file
BIN
PDFFigures/UDLChap20PDF.zip
Normal file
Binary file not shown.
BIN
PDFFigures/UDLChap21PDF.zip
Normal file
BIN
PDFFigures/UDLChap21PDF.zip
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user