Compare commits
174 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d80d04c2d4 | ||
|
|
c1f0181653 | ||
|
|
6e18234d24 | ||
|
|
5730c05547 | ||
|
|
ccb80c16b8 | ||
|
|
87387b2b4c | ||
|
|
06eaec9749 | ||
|
|
9aeda14efa | ||
|
|
d1df6426b2 | ||
|
|
43b8fa3685 | ||
|
|
ca6e4b29ac | ||
|
|
267d6ccb7f | ||
|
|
735947b728 | ||
|
|
251aef1876 | ||
|
|
07ff6c06b1 | ||
|
|
29e4cec04e | ||
|
|
c3ce38410c | ||
|
|
646e60ed95 | ||
|
|
5e61bcf694 | ||
|
|
54399a3c68 | ||
|
|
3926ff41ea | ||
|
|
9c34bfed02 | ||
|
|
9176623331 | ||
|
|
5534df187e | ||
|
|
9b58b2862f | ||
|
|
2070ac4400 | ||
|
|
393e4907dc | ||
|
|
e850676722 | ||
|
|
796f17ed90 | ||
|
|
dc0301a86e | ||
|
|
813f628e4e | ||
|
|
3ae7d68f6e | ||
|
|
a96a14999f | ||
|
|
f91e878eef | ||
|
|
9b89499b75 | ||
|
|
7d6ac5e34f | ||
|
|
55dbe7e0c4 | ||
|
|
1cf21ea61a | ||
|
|
e4191beb79 | ||
|
|
10b9dea9a4 | ||
|
|
414eeb3557 | ||
|
|
f126809572 | ||
|
|
2a30c49d22 | ||
|
|
bb32fe0cdf | ||
|
|
1ee756cf9a | ||
|
|
742d922ce7 | ||
|
|
c02eea499c | ||
|
|
cb94b61abd | ||
|
|
447bb82e2f | ||
|
|
77da5694bb | ||
|
|
96c7e41c9d | ||
|
|
625d1e29bb | ||
|
|
3cf0c4c418 | ||
|
|
03c92541ad | ||
|
|
def3e5234b | ||
|
|
815adb9b21 | ||
|
|
5ba28e5b56 | ||
|
|
8566a7322f | ||
|
|
c867e67e8c | ||
|
|
cba27b3da4 | ||
|
|
1c706bd058 | ||
|
|
72514994bf | ||
|
|
872926c17e | ||
|
|
0dfeb169be | ||
|
|
89a0532283 | ||
|
|
af5a719496 | ||
|
|
56c31efc90 | ||
|
|
06fc37c243 | ||
|
|
45793f02f8 | ||
|
|
7c4cc1ddb4 | ||
|
|
35b6f67bbf | ||
|
|
194baf622a | ||
|
|
a547fee3f4 | ||
|
|
ea4858e78e | ||
|
|
444b06d5c2 | ||
|
|
98bce9edb5 | ||
|
|
37e9ae2311 | ||
|
|
ea1b6ad998 | ||
|
|
d17a5a3872 | ||
|
|
3e7e059bff | ||
|
|
445ad11c46 | ||
|
|
6928b50966 | ||
|
|
e1d34ed561 | ||
|
|
f3528f758b | ||
|
|
5c7a03172a | ||
|
|
0233131b07 | ||
|
|
8200299e64 | ||
|
|
2ac42e70d3 | ||
|
|
dd0eaeb781 | ||
|
|
2cdff544f3 | ||
|
|
384e122c5f | ||
|
|
1343b68c60 | ||
|
|
30420a2f92 | ||
|
|
89e8ebcbc5 | ||
|
|
14b751ff47 | ||
|
|
80e99ef2da | ||
|
|
46214f64bc | ||
|
|
c875fb0361 | ||
|
|
451ccc0832 | ||
|
|
4b939b7426 | ||
|
|
2d300a16a1 | ||
|
|
d057548be9 | ||
|
|
75976a32d0 | ||
|
|
48b204df2c | ||
|
|
9b68e6a8e6 | ||
|
|
862ac6e4d3 | ||
|
|
8fe07cf0fb | ||
|
|
c9679dee90 | ||
|
|
90d879494f | ||
|
|
19bdc23674 | ||
|
|
d7f9929a3c | ||
|
|
a7ac089fc0 | ||
|
|
8fd753d191 | ||
|
|
51424b57bd | ||
|
|
80732b29bc | ||
|
|
36e3a53764 | ||
|
|
569749963b | ||
|
|
d17e47421b | ||
|
|
e8fca0cb0a | ||
|
|
19c0c7ab3e | ||
|
|
418ea93e83 | ||
|
|
ea248af22f | ||
|
|
5492ed0ee5 | ||
|
|
d9138d6177 | ||
|
|
a5413d6a15 | ||
|
|
faf53a49a0 | ||
|
|
7e41097381 | ||
|
|
72b2d79ec7 | ||
|
|
d81bef8a6e | ||
|
|
911da8ca58 | ||
|
|
031401a3dd | ||
|
|
4652f90f09 | ||
|
|
5f524edd3b | ||
|
|
7a423507f5 | ||
|
|
4a5bd9c4d5 | ||
|
|
c0cd9c2aea | ||
|
|
924b6e220d | ||
|
|
b535a13d57 | ||
|
|
d0d413b9f6 | ||
|
|
1b53be1e08 | ||
|
|
bd12e774a4 | ||
|
|
e6c3938567 | ||
|
|
50c93469d5 | ||
|
|
666e2de7d8 | ||
|
|
e947b261f8 | ||
|
|
30801a1d2b | ||
|
|
22d5bc320f | ||
|
|
5c0fd0057f | ||
|
|
9b2b30d4cc | ||
|
|
46e119fcf2 | ||
|
|
f197be3554 | ||
|
|
0fa468cf2c | ||
|
|
e11989bd78 | ||
|
|
566120cc48 | ||
|
|
9f2449fcde | ||
|
|
025b677457 | ||
|
|
435971e3e2 | ||
|
|
6e76cb9b96 | ||
|
|
732fc6f0b7 | ||
|
|
f2a3fab832 | ||
|
|
8e3008673d | ||
|
|
07bcc98a85 | ||
|
|
f4fa3e8397 | ||
|
|
21cff37c72 | ||
|
|
187c6a7352 | ||
|
|
8e4a0d4daf | ||
|
|
23b5affab3 | ||
|
|
4fb8ffe622 | ||
|
|
2adc1da566 | ||
|
|
6e4551a69f | ||
|
|
65c685706a | ||
|
|
934f5f7748 | ||
|
|
365cb41bba | ||
|
|
4855761fb2 |
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*.{js,jsx,ts,tsx,md,mdx,json,cjs,mjs,css}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
end_of_line = lf
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
max_line_length = 100
|
||||||
18
.eslintrc.cjs
Normal file
18
.eslintrc.cjs
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
module.exports = {
|
||||||
|
root: true,
|
||||||
|
env: { browser: true, es2020: true, node: true },
|
||||||
|
extends: [
|
||||||
|
"eslint:recommended",
|
||||||
|
"plugin:react/recommended",
|
||||||
|
"plugin:react/jsx-runtime",
|
||||||
|
"plugin:react-hooks/recommended",
|
||||||
|
],
|
||||||
|
ignorePatterns: ["build", ".eslintrc.cjs"],
|
||||||
|
parserOptions: { ecmaVersion: "latest", sourceType: "module" },
|
||||||
|
settings: { react: { version: "18.2" } },
|
||||||
|
plugins: ["react-refresh"],
|
||||||
|
rules: {
|
||||||
|
"react/jsx-no-target-blank": "off",
|
||||||
|
"react-refresh/only-export-components": ["warn", { allowConstantExport: true }],
|
||||||
|
},
|
||||||
|
};
|
||||||
30
.gitignore
vendored
Executable file
30
.gitignore
vendored
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||||
|
|
||||||
|
# dependencies
|
||||||
|
/node_modules
|
||||||
|
/.pnp
|
||||||
|
.pnp.js
|
||||||
|
|
||||||
|
# testing
|
||||||
|
/coverage
|
||||||
|
|
||||||
|
# production
|
||||||
|
/dist
|
||||||
|
|
||||||
|
# ENV
|
||||||
|
.env.local
|
||||||
|
.env.development.local
|
||||||
|
.env.test.local
|
||||||
|
.env.production.local
|
||||||
|
|
||||||
|
# debug
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
|
||||||
|
# IDE
|
||||||
|
.idea
|
||||||
|
.vscode
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
.DS_Store
|
||||||
7
.prettierignore
Normal file
7
.prettierignore
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# ignore these directories when formatting the repo
|
||||||
|
/Blogs
|
||||||
|
/CM20315
|
||||||
|
/CM20315_2023
|
||||||
|
/Notebooks
|
||||||
|
/PDFFigures
|
||||||
|
/Slides
|
||||||
14
.prettierrc.cjs
Normal file
14
.prettierrc.cjs
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
/** @type {import("prettier").Config} */
|
||||||
|
const prettierConfig = {
|
||||||
|
trailingComma: "all",
|
||||||
|
tabWidth: 4,
|
||||||
|
useTabs: false,
|
||||||
|
semi: true,
|
||||||
|
singleQuote: false,
|
||||||
|
bracketSpacing: true,
|
||||||
|
printWidth: 100,
|
||||||
|
endOfLine: "lf",
|
||||||
|
plugins: [require.resolve("prettier-plugin-organize-imports")],
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = prettierConfig;
|
||||||
1097
Blogs/BorealisBayesianFunction.ipynb
Normal file
1097
Blogs/BorealisBayesianFunction.ipynb
Normal file
File diff suppressed because one or more lines are too long
519
Blogs/BorealisBayesianParameter.ipynb
Normal file
519
Blogs/BorealisBayesianParameter.ipynb
Normal file
File diff suppressed because one or more lines are too long
401
Blogs/BorealisGradientFlow.ipynb
Normal file
401
Blogs/BorealisGradientFlow.ipynb
Normal file
@@ -0,0 +1,401 @@
|
|||||||
|
{
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 0,
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"provenance": [],
|
||||||
|
"authorship_tag": "ABX9TyP9fLqBQPgcYJB1KXs3Scp/",
|
||||||
|
"include_colab_link": true
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"name": "python3",
|
||||||
|
"display_name": "Python 3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"name": "python"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "view-in-github",
|
||||||
|
"colab_type": "text"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Blogs/BorealisGradientFlow.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"# Gradient flow\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook replicates some of the results in the the Borealis AI [blog](https://www.borealisai.com/research-blogs/gradient-flow/) on gradient flow. \n"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "ucrRRJ4dq8_d"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Import relevant libraries\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from scipy.linalg import expm\n",
|
||||||
|
"from matplotlib import cm\n",
|
||||||
|
"from matplotlib.colors import ListedColormap"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "_IQFHZEMZE8T"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Create the three data points that are used to train the linear model in the blog. Each input point is a column in $\\mathbf{X}$ and consists of the $x$ position in the plot and the value 1, which is used to allow the model to fit bias terms neatly."
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "NwgUP3MSriiJ"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "cJNZ2VIcYsD8"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"X = np.array([[0.2, 0.4, 0.8],[1,1,1]])\n",
|
||||||
|
"y = np.array([[-0.1],[0.15],[0.3]])\n",
|
||||||
|
"D = X.shape[0]\n",
|
||||||
|
"I = X.shape[1]\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"X=\\n\",X)\n",
|
||||||
|
"print(\"y=\\n\",y)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Draw the three data points\n",
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"ax.plot(X[0:1,:],y.T,'ro')\n",
|
||||||
|
"ax.set_xlim([0,1]); ax.set_ylim([-0.5,0.5])\n",
|
||||||
|
"ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||||
|
"plt.show()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "FpFlD4nUZDRt"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Compute the evolution of the residuals, loss, and parameters as a function of time."
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "H2LBR1DasQej"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Discretized time to evaluate quantities at\n",
|
||||||
|
"t_all = np.arange(0,20,0.01)\n",
|
||||||
|
"nT = t_all.shape[0]\n",
|
||||||
|
"\n",
|
||||||
|
"# Initial parameters, and initial function output at training points\n",
|
||||||
|
"phi_0 = np.array([[-0.05],[-0.4]])\n",
|
||||||
|
"f_0 = X.T @ phi_0\n",
|
||||||
|
"\n",
|
||||||
|
"# Precompute pseudoinverse term (not a very sensible numerical implementation, but it works...)\n",
|
||||||
|
"XXTInvX = np.linalg.inv(X@X.T)@X\n",
|
||||||
|
"\n",
|
||||||
|
"# Create arrays to hold function at data points over time, residual over time, parameters over time\n",
|
||||||
|
"f_all = np.zeros((I,nT))\n",
|
||||||
|
"f_minus_y_all = np.zeros((I,nT))\n",
|
||||||
|
"phi_t_all = np.zeros((D,nT))\n",
|
||||||
|
"\n",
|
||||||
|
"# For each time, compute function, residual, and parameters at each time.\n",
|
||||||
|
"for t in range(len(t_all)):\n",
|
||||||
|
" f = y + expm(-X.T@X * t_all[t]) @ (f_0-y)\n",
|
||||||
|
" f_all[:,t:t+1] = f\n",
|
||||||
|
" f_minus_y_all[:,t:t+1] = f-y\n",
|
||||||
|
" phi_t_all[:,t:t+1] = phi_0 - XXTInvX @ (np.identity(3)-expm(-X.T@X * t_all[t])) @ (f_0-y)"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "wfF_oTS5Z4Wi"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Plot the results that were calculated in the previous cell"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "9jSjOOFutJUE"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Plot function at data points\n",
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_all[0,:]),'r-', label='$f[x_{0},\\phi]$')\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_all[1,:]),'g-', label='$f[x_{1},\\phi]$')\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_all[2,:]),'b-', label='$f[x_{2},\\phi]$')\n",
|
||||||
|
"ax.set_xlim([0,np.max(t_all)]); ax.set_ylim([-0.5,0.5])\n",
|
||||||
|
"ax.set_xlabel('t'); ax.set_ylabel('f')\n",
|
||||||
|
"plt.legend(loc=\"lower right\")\n",
|
||||||
|
"plt.show()\n",
|
||||||
|
"\n",
|
||||||
|
"# Plot residual\n",
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_minus_y_all[0,:]),'r-', label='$f[x_{0},\\phi]-y_{0}$')\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_minus_y_all[1,:]),'g-', label='$f[x_{1},\\phi]-y_{1}$')\n",
|
||||||
|
"ax.plot(t_all,np.squeeze(f_minus_y_all[2,:]),'b-', label='$f[x_{2},\\phi]-y_{2}$')\n",
|
||||||
|
"ax.set_xlim([0,np.max(t_all)]); ax.set_ylim([-0.5,0.5])\n",
|
||||||
|
"ax.set_xlabel('t'); ax.set_ylabel('f-y')\n",
|
||||||
|
"plt.legend(loc=\"lower right\")\n",
|
||||||
|
"plt.show()\n",
|
||||||
|
"\n",
|
||||||
|
"# Plot loss (sum of residuals)\n",
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"square_error = 0.5 * np.sum(f_minus_y_all * f_minus_y_all, axis=0)\n",
|
||||||
|
"ax.plot(t_all, square_error,'k-')\n",
|
||||||
|
"ax.set_xlim([0,np.max(t_all)]); ax.set_ylim([-0.0,0.25])\n",
|
||||||
|
"ax.set_xlabel('t'); ax.set_ylabel('Loss')\n",
|
||||||
|
"plt.show()\n",
|
||||||
|
"\n",
|
||||||
|
"# Plot parameters\n",
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"ax.plot(t_all, np.squeeze(phi_t_all[0,:]),'c-',label='$\\phi_{0}$')\n",
|
||||||
|
"ax.plot(t_all, np.squeeze(phi_t_all[1,:]),'m-',label='$\\phi_{1}$')\n",
|
||||||
|
"ax.set_xlim([0,np.max(t_all)]); ax.set_ylim([-1,1])\n",
|
||||||
|
"ax.set_xlabel('t'); ax.set_ylabel('$\\phi$')\n",
|
||||||
|
"plt.legend(loc=\"lower right\")\n",
|
||||||
|
"plt.show()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "G9IwgwKltHz5"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Define the model and the loss function"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "N6VaUq2swa8D"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Model is just a straight line with intercept phi[0] and slope phi[1]\n",
|
||||||
|
"def model(phi,x):\n",
|
||||||
|
" y_pred = phi[0]+phi[1] * x\n",
|
||||||
|
" return y_pred\n",
|
||||||
|
"\n",
|
||||||
|
"# Loss function is 0.5 times sum of squares of residuals for training data\n",
|
||||||
|
"def compute_loss(data_x, data_y, model, phi):\n",
|
||||||
|
" pred_y = model(phi, data_x)\n",
|
||||||
|
" loss = 0.5 * np.sum((pred_y-data_y)*(pred_y-data_y))\n",
|
||||||
|
" return loss"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "LGHEVUWWiB4f"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Draw the loss function"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "hr3hs7pKwo0g"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"def draw_loss_function(compute_loss, X, y, model, phi_iters):\n",
|
||||||
|
" # Define pretty colormap\n",
|
||||||
|
" my_colormap_vals_hex =('2a0902', '2b0a03', '2c0b04', '2d0c05', '2e0c06', '2f0d07', '300d08', '310e09', '320f0a', '330f0b', '34100b', '35110c', '36110d', '37120e', '38120f', '39130f', '3a1410', '3b1411', '3c1511', '3d1612', '3e1613', '3f1713', '401714', '411814', '421915', '431915', '451a16', '461b16', '471b17', '481c17', '491d18', '4a1d18', '4b1e19', '4c1f19', '4d1f1a', '4e201b', '50211b', '51211c', '52221c', '53231d', '54231d', '55241e', '56251e', '57261f', '58261f', '592720', '5b2821', '5c2821', '5d2922', '5e2a22', '5f2b23', '602b23', '612c24', '622d25', '632e25', '652e26', '662f26', '673027', '683027', '693128', '6a3229', '6b3329', '6c342a', '6d342a', '6f352b', '70362c', '71372c', '72372d', '73382e', '74392e', '753a2f', '763a2f', '773b30', '783c31', '7a3d31', '7b3e32', '7c3e33', '7d3f33', '7e4034', '7f4134', '804235', '814236', '824336', '834437', '854538', '864638', '874739', '88473a', '89483a', '8a493b', '8b4a3c', '8c4b3c', '8d4c3d', '8e4c3e', '8f4d3f', '904e3f', '924f40', '935041', '945141', '955242', '965343', '975343', '985444', '995545', '9a5646', '9b5746', '9c5847', '9d5948', '9e5a49', '9f5a49', 'a05b4a', 'a15c4b', 'a35d4b', 'a45e4c', 'a55f4d', 'a6604e', 'a7614e', 'a8624f', 'a96350', 'aa6451', 'ab6552', 'ac6552', 'ad6653', 'ae6754', 'af6855', 'b06955', 'b16a56', 'b26b57', 'b36c58', 'b46d59', 'b56e59', 'b66f5a', 'b7705b', 'b8715c', 'b9725d', 'ba735d', 'bb745e', 'bc755f', 'bd7660', 'be7761', 'bf7862', 'c07962', 'c17a63', 'c27b64', 'c27c65', 'c37d66', 'c47e67', 'c57f68', 'c68068', 'c78169', 'c8826a', 'c9836b', 'ca846c', 'cb856d', 'cc866e', 'cd876f', 'ce886f', 'ce8970', 'cf8a71', 'd08b72', 'd18c73', 'd28d74', 'd38e75', 'd48f76', 'd59077', 'd59178', 'd69279', 'd7937a', 'd8957b', 'd9967b', 'da977c', 'da987d', 'db997e', 'dc9a7f', 'dd9b80', 'de9c81', 'de9d82', 'df9e83', 'e09f84', 'e1a185', 'e2a286', 'e2a387', 'e3a488', 'e4a589', 'e5a68a', 'e5a78b', 'e6a88c', 'e7aa8d', 'e7ab8e', 'e8ac8f', 'e9ad90', 'eaae91', 'eaaf92', 'ebb093', 'ecb295', 'ecb396', 'edb497', 'eeb598', 'eeb699', 'efb79a', 'efb99b', 'f0ba9c', 'f1bb9d', 'f1bc9e', 'f2bd9f', 'f2bfa1', 'f3c0a2', 'f3c1a3', 'f4c2a4', 'f5c3a5', 'f5c5a6', 'f6c6a7', 'f6c7a8', 'f7c8aa', 'f7c9ab', 'f8cbac', 'f8ccad', 'f8cdae', 'f9ceb0', 'f9d0b1', 'fad1b2', 'fad2b3', 'fbd3b4', 'fbd5b6', 'fbd6b7', 'fcd7b8', 'fcd8b9', 'fcdaba', 'fddbbc', 'fddcbd', 'fddebe', 'fddfbf', 'fee0c1', 'fee1c2', 'fee3c3', 'fee4c5', 'ffe5c6', 'ffe7c7', 'ffe8c9', 'ffe9ca', 'ffebcb', 'ffeccd', 'ffedce', 'ffefcf', 'fff0d1', 'fff2d2', 'fff3d3', 'fff4d5', 'fff6d6', 'fff7d8', 'fff8d9', 'fffada', 'fffbdc', 'fffcdd', 'fffedf', 'ffffe0')\n",
|
||||||
|
" my_colormap_vals_dec = np.array([int(element,base=16) for element in my_colormap_vals_hex])\n",
|
||||||
|
" r = np.floor(my_colormap_vals_dec/(256*256))\n",
|
||||||
|
" g = np.floor((my_colormap_vals_dec - r *256 *256)/256)\n",
|
||||||
|
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||||
|
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||||
|
"\n",
|
||||||
|
" # Make grid of intercept/slope values to plot\n",
|
||||||
|
" intercepts_mesh, slopes_mesh = np.meshgrid(np.arange(-1.0,1.0,0.005), np.arange(-1.0,1.0,0.005))\n",
|
||||||
|
" loss_mesh = np.zeros_like(slopes_mesh)\n",
|
||||||
|
" # Compute loss for every set of parameters\n",
|
||||||
|
" for idslope, slope in np.ndenumerate(slopes_mesh):\n",
|
||||||
|
" loss_mesh[idslope] = compute_loss(X, y, model, np.array([[intercepts_mesh[idslope]], [slope]]))\n",
|
||||||
|
"\n",
|
||||||
|
" fig,ax = plt.subplots()\n",
|
||||||
|
" fig.set_size_inches(8,8)\n",
|
||||||
|
" ax.contourf(intercepts_mesh,slopes_mesh,loss_mesh,256,cmap=my_colormap)\n",
|
||||||
|
" ax.contour(intercepts_mesh,slopes_mesh,loss_mesh,40,colors=['#80808080'])\n",
|
||||||
|
" ax.set_ylim([1,-1]); ax.set_xlim([-1,1])\n",
|
||||||
|
"\n",
|
||||||
|
" ax.plot(phi_iters[1,:], phi_iters[0,:],'g-')\n",
|
||||||
|
" ax.set_xlabel('Intercept'); ax.set_ylabel('Slope')\n",
|
||||||
|
" plt.show()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "UCxa3tZ8a9kz"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"draw_loss_function(compute_loss, X[0:1,:], y.T, model, phi_t_all)"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "pXLLBaSaiI2A"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Draw the evolution of the function"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "ZsremHW-xFi5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"fig, ax = plt.subplots()\n",
|
||||||
|
"ax.plot(X[0:1,:],y.T,'ro')\n",
|
||||||
|
"x_vals = np.arange(0,1,0.001)\n",
|
||||||
|
"ax.plot(x_vals, phi_t_all[0,0]*x_vals + phi_t_all[1,0],'r-', label='t=0.00')\n",
|
||||||
|
"ax.plot(x_vals, phi_t_all[0,10]*x_vals + phi_t_all[1,10],'g-', label='t=0.10')\n",
|
||||||
|
"ax.plot(x_vals, phi_t_all[0,30]*x_vals + phi_t_all[1,30],'b-', label='t=0.30')\n",
|
||||||
|
"ax.plot(x_vals, phi_t_all[0,200]*x_vals + phi_t_all[1,200],'c-', label='t=2.00')\n",
|
||||||
|
"ax.plot(x_vals, phi_t_all[0,1999]*x_vals + phi_t_all[1,1999],'y-', label='t=20.0')\n",
|
||||||
|
"ax.set_xlim([0,1]); ax.set_ylim([-0.5,0.5])\n",
|
||||||
|
"ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||||
|
"plt.legend(loc=\"upper left\")\n",
|
||||||
|
"plt.show()"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "cv9ZrUoRkuhI"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Compute MAP and ML solutions\n",
|
||||||
|
"MLParams = np.linalg.inv(X@X.T)@X@y\n",
|
||||||
|
"sigma_sq_p = 3.0\n",
|
||||||
|
"sigma_sq = 0.05\n",
|
||||||
|
"MAPParams = np.linalg.inv(X@X.T+np.identity(X.shape[0])*sigma_sq/sigma_sq_p)@X@y"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "OU9oegSOof-o"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Finally, we predict both the mean and the uncertainty in the fitted model as a function of time"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "Ul__XvOgyYSA"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"# Define x positions to make predictions (appending a 1 to each column)\n",
|
||||||
|
"x_predict = np.arange(0,1,0.01)[None,:]\n",
|
||||||
|
"x_predict = np.concatenate((x_predict,np.ones_like(x_predict)))\n",
|
||||||
|
"nX = x_predict.shape[1]\n",
|
||||||
|
"\n",
|
||||||
|
"# Create variables to store evolution of mean and variance of prediction over time\n",
|
||||||
|
"predict_mean_all = np.zeros((nT,nX))\n",
|
||||||
|
"predict_var_all = np.zeros((nT,nX))\n",
|
||||||
|
"\n",
|
||||||
|
"# Initial covariance\n",
|
||||||
|
"sigma_sq_p = 2.0\n",
|
||||||
|
"cov_init = sigma_sq_p * np.identity(2)\n",
|
||||||
|
"\n",
|
||||||
|
"# Run through each time computing a and b and hence mean and variance of prediction\n",
|
||||||
|
"for t in range(len(t_all)):\n",
|
||||||
|
" a = x_predict.T @(XXTInvX @ (np.identity(3)-expm(-X.T@X * t_all[t])) @ y)\n",
|
||||||
|
" b = x_predict.T -x_predict.T@XXTInvX @ (np.identity(3)-expm(-X.T@X * t_all[t])) @ X.T\n",
|
||||||
|
" predict_mean_all[t:t+1,:] = a.T\n",
|
||||||
|
" predict_cov = b@ cov_init @b.T\n",
|
||||||
|
" # We just want the diagonal of the covariance to plot the uncertainty\n",
|
||||||
|
" predict_var_all[t:t+1,:] = np.reshape(np.diag(predict_cov),(1,nX))"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "aMPADCuByKWr"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"source": [
|
||||||
|
"Plot the mean and variance at various times"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "PZTj93KK7QH6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"source": [
|
||||||
|
"def plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t, sigma_sq = 0.00001):\n",
|
||||||
|
" fig, ax = plt.subplots()\n",
|
||||||
|
" ax.plot(X[0:1,:],y.T,'ro')\n",
|
||||||
|
" ax.plot(x_predict[0:1,:].T, predict_mean_all[this_t:this_t+1,:].T,'r-')\n",
|
||||||
|
" lower = np.squeeze(predict_mean_all[this_t:this_t+1,:].T-np.sqrt(predict_var_all[this_t:this_t+1,:].T+np.sqrt(sigma_sq)))\n",
|
||||||
|
" upper = np.squeeze(predict_mean_all[this_t:this_t+1,:].T+np.sqrt(predict_var_all[this_t:this_t+1,:].T+np.sqrt(sigma_sq)))\n",
|
||||||
|
" ax.fill_between(np.squeeze(x_predict[0:1,:]), lower, upper, color='lightgray')\n",
|
||||||
|
" ax.set_xlim([0,1]); ax.set_ylim([-0.5,0.5])\n",
|
||||||
|
" ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||||
|
" plt.show()\n",
|
||||||
|
"\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=0)\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=40)\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=80)\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=200)\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=500)\n",
|
||||||
|
"plot_mean_var(X,y,x_predict, predict_mean_all, predict_var_all, this_t=1000)"
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"id": "bYAFxgB880-v"
|
||||||
|
},
|
||||||
|
"execution_count": null,
|
||||||
|
"outputs": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
1109
Blogs/BorealisNTK.ipynb
Normal file
1109
Blogs/BorealisNTK.ipynb
Normal file
File diff suppressed because one or more lines are too long
1127
Blogs/Borealis_NNGP.ipynb
Normal file
1127
Blogs/Borealis_NNGP.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -1,18 +1,16 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap01/1_1_BackgroundMathematics.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap01/1_1_BackgroundMathematics.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "s5zzKSOusPOB"
|
"id": "s5zzKSOusPOB"
|
||||||
@@ -41,7 +39,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "WV2Dl6owme2d"
|
"id": "WV2Dl6owme2d"
|
||||||
@@ -49,11 +46,11 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"**Linear functions**<br> We will be using the term *linear equation* to mean a weighted sum of inputs plus an offset. If there is just one input $x$, then this is a straight line:\n",
|
"**Linear functions**<br> We will be using the term *linear equation* to mean a weighted sum of inputs plus an offset. If there is just one input $x$, then this is a straight line:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\\begin{equation}y=\\beta+\\omega x,\\end{equation} \n",
|
"\\begin{equation}y=\\beta+\\omega x,\\end{equation}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"where $\\beta$ is the y-intercept of the linear and $\\omega$ is the slope of the line. When there are two inputs $x_{1}$ and $x_{2}$, then this becomes:\n",
|
"where $\\beta$ is the y-intercept of the linear and $\\omega$ is the slope of the line. When there are two inputs $x_{1}$ and $x_{2}$, then this becomes:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\\begin{equation}y=\\beta+\\omega_1 x_1 + \\omega_2 x_2.\\end{equation} \n",
|
"\\begin{equation}y=\\beta+\\omega_1 x_1 + \\omega_2 x_2.\\end{equation}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Any other functions are by definition **non-linear**.\n",
|
"Any other functions are by definition **non-linear**.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -99,7 +96,7 @@
|
|||||||
"ax.plot(x,y,'r-')\n",
|
"ax.plot(x,y,'r-')\n",
|
||||||
"ax.set_ylim([0,10]);ax.set_xlim([0,10])\n",
|
"ax.set_ylim([0,10]);ax.set_xlim([0,10])\n",
|
||||||
"ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
"ax.set_xlabel('x'); ax.set_ylabel('y')\n",
|
||||||
"plt.show\n",
|
"plt.show()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# TODO -- experiment with changing the values of beta and omega\n",
|
"# TODO -- experiment with changing the values of beta and omega\n",
|
||||||
"# to understand what they do. Try to make a line\n",
|
"# to understand what they do. Try to make a line\n",
|
||||||
@@ -107,7 +104,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "AedfvD9dxShZ"
|
"id": "AedfvD9dxShZ"
|
||||||
@@ -192,7 +188,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "i8tLwpls476R"
|
"id": "i8tLwpls476R"
|
||||||
@@ -236,7 +231,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "fGzVJQ6N-mHJ"
|
"id": "fGzVJQ6N-mHJ"
|
||||||
@@ -275,11 +269,10 @@
|
|||||||
"# Compute with vector/matrix form\n",
|
"# Compute with vector/matrix form\n",
|
||||||
"y_vec = beta_vec+np.matmul(omega_mat, x_vec)\n",
|
"y_vec = beta_vec+np.matmul(omega_mat, x_vec)\n",
|
||||||
"print(\"Matrix/vector form\")\n",
|
"print(\"Matrix/vector form\")\n",
|
||||||
"print('y1= %3.3f\\ny2 = %3.3f'%((y_vec[0],y_vec[1])))\n"
|
"print('y1= %3.3f\\ny2 = %3.3f'%((y_vec[0][0],y_vec[1][0])))\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "3LGRoTMLU8ZU"
|
"id": "3LGRoTMLU8ZU"
|
||||||
@@ -293,7 +286,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "7Y5zdKtKZAB2"
|
"id": "7Y5zdKtKZAB2"
|
||||||
@@ -325,11 +317,10 @@
|
|||||||
"ax.plot(x,y,'r-')\n",
|
"ax.plot(x,y,'r-')\n",
|
||||||
"ax.set_ylim([0,100]);ax.set_xlim([-5,5])\n",
|
"ax.set_ylim([0,100]);ax.set_xlim([-5,5])\n",
|
||||||
"ax.set_xlabel('x'); ax.set_ylabel('exp[x]')\n",
|
"ax.set_xlabel('x'); ax.set_ylabel('exp[x]')\n",
|
||||||
"plt.show"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "XyrT8257IWCu"
|
"id": "XyrT8257IWCu"
|
||||||
@@ -341,11 +332,10 @@
|
|||||||
"2. What is $\\exp[1]$?\n",
|
"2. What is $\\exp[1]$?\n",
|
||||||
"3. What is $\\exp[-\\infty]$?\n",
|
"3. What is $\\exp[-\\infty]$?\n",
|
||||||
"4. What is $\\exp[+\\infty]$?\n",
|
"4. What is $\\exp[+\\infty]$?\n",
|
||||||
"5. A function is convex if we can draw a straight line between any two points on the function, and this line always lies above the function. Similarly, a function is concave if a straight line between any two points always lies below the function. Is the exponential function convex or concave or neither?\n"
|
"5. A function is convex if we can draw a straight line between any two points on the function, and the line lies above the function everywhere between these two points. Similarly, a function is concave if a straight line between any two points lies below the function everywhere between these two points. Is the exponential function convex or concave or neither?\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "R6A4e5IxIWCu"
|
"id": "R6A4e5IxIWCu"
|
||||||
@@ -373,11 +363,10 @@
|
|||||||
"ax.plot(x,y,'r-')\n",
|
"ax.plot(x,y,'r-')\n",
|
||||||
"ax.set_ylim([-5,5]);ax.set_xlim([0,5])\n",
|
"ax.set_ylim([-5,5]);ax.set_xlim([0,5])\n",
|
||||||
"ax.set_xlabel('x'); ax.set_ylabel('$\\log[x]$')\n",
|
"ax.set_xlabel('x'); ax.set_ylabel('$\\log[x]$')\n",
|
||||||
"plt.show"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "yYWrL5AXIWCv"
|
"id": "yYWrL5AXIWCv"
|
||||||
@@ -397,8 +386,8 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"include_colab_link": true,
|
"provenance": [],
|
||||||
"provenance": []
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3 (ipykernel)",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOmndC0N7dFV7W3Mh5ljOLl",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -197,7 +196,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Visualizing the loss function\n",
|
"# Visualizing the loss function\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The above process is equivalent to to descending coordinate wise on the loss function<br>\n",
|
"The above process is equivalent to descending coordinate wise on the loss function<br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Now let's plot that function"
|
"Now let's plot that function"
|
||||||
],
|
],
|
||||||
@@ -235,8 +234,8 @@
|
|||||||
"levels = 40\n",
|
"levels = 40\n",
|
||||||
"ax.contour(phi0_mesh, phi1_mesh, all_losses ,levels, colors=['#80808080'])\n",
|
"ax.contour(phi0_mesh, phi1_mesh, all_losses ,levels, colors=['#80808080'])\n",
|
||||||
"ax.set_ylim([1,-1])\n",
|
"ax.set_ylim([1,-1])\n",
|
||||||
"ax.set_xlabel('Intercept, $\\phi_0$')\n",
|
"ax.set_xlabel(r'Intercept, $\\phi_0$')\n",
|
||||||
"ax.set_ylabel('Slope, $\\phi_1$')\n",
|
"ax.set_ylabel(r'Slope, $\\phi_1$')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot the position of your best fitting line on the loss function\n",
|
"# Plot the position of your best fitting line on the loss function\n",
|
||||||
"# It should be close to the minimum\n",
|
"# It should be close to the minimum\n",
|
||||||
|
|||||||
@@ -1,18 +1,16 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "1Z6LB4Ybn1oN"
|
"id": "1Z6LB4Ybn1oN"
|
||||||
@@ -42,7 +40,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "wQDy9UzXpnf5"
|
"id": "wQDy9UzXpnf5"
|
||||||
@@ -102,8 +99,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Define a shallow neural network with, one input, one output, and three hidden units\n",
|
"# Define a shallow neural network with, one input, one output, and three hidden units\n",
|
||||||
"def shallow_1_1_3(x, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31):\n",
|
"def shallow_1_1_3(x, activation_fn, phi_0,phi_1,phi_2,phi_3, theta_10, theta_11, theta_20, theta_21, theta_30, theta_31):\n",
|
||||||
" # TODO Replace the lines below to compute the three initial lines\n",
|
" # TODO Replace the code below to compute the three initial lines\n",
|
||||||
" # (figure 3.3a-c) from the theta parameters. These are the preactivations\n",
|
" # from the theta parameters (i.e. implement equations at bottom of figure 3.3a-c). These are the preactivations\n",
|
||||||
" pre_1 = np.zeros_like(x)\n",
|
" pre_1 = np.zeros_like(x)\n",
|
||||||
" pre_2 = np.zeros_like(x)\n",
|
" pre_2 = np.zeros_like(x)\n",
|
||||||
" pre_3 = np.zeros_like(x)\n",
|
" pre_3 = np.zeros_like(x)\n",
|
||||||
@@ -199,7 +196,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "T34bszToImKQ"
|
"id": "T34bszToImKQ"
|
||||||
@@ -210,7 +206,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "jhaBSS8oIWSX"
|
"id": "jhaBSS8oIWSX"
|
||||||
@@ -269,7 +264,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "osonHsEqVp2I"
|
"id": "osonHsEqVp2I"
|
||||||
@@ -354,9 +348,8 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"authorship_tag": "ABX9TyPBNztJrxnUt1ELWfm1Awa3",
|
"provenance": [],
|
||||||
"include_colab_link": true,
|
"include_colab_link": true
|
||||||
"provenance": []
|
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
|
|||||||
@@ -134,7 +134,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Let's define two networks. We'll put the prefixes n1_ and n2_ before all the variables to make it clear which network is which. We'll just consider the inputs and outputs over the range [-1,1]. If you set the \"plot_all\" flat to True, you can see the details of how they were created."
|
"Let's define two networks. We'll put the prefixes n1_ and n2_ before all the variables to make it clear which network is which. We'll just consider the inputs and outputs over the range [-1,1]."
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "LxBJCObC-NTY"
|
"id": "LxBJCObC-NTY"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyPkFrjmRAUf0fxN07RC4xMI",
|
"authorship_tag": "ABX9TyPZzptvvf7OPZai8erQ/0xT",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -127,26 +127,26 @@
|
|||||||
" fig, ax = plt.subplots(3,3)\n",
|
" fig, ax = plt.subplots(3,3)\n",
|
||||||
" fig.set_size_inches(8.5, 8.5)\n",
|
" fig.set_size_inches(8.5, 8.5)\n",
|
||||||
" fig.tight_layout(pad=3.0)\n",
|
" fig.tight_layout(pad=3.0)\n",
|
||||||
" ax[0,0].plot(x,layer2_pre_1,'r-'); ax[0,0].set_ylabel('$\\psi_{10}+\\psi_{11}h_{1}+\\psi_{12}h_{2}+\\psi_{13}h_3$')\n",
|
" ax[0,0].plot(x,layer2_pre_1,'r-'); ax[0,0].set_ylabel(r'$\\psi_{10}+\\psi_{11}h_{1}+\\psi_{12}h_{2}+\\psi_{13}h_3$')\n",
|
||||||
" ax[0,1].plot(x,layer2_pre_2,'b-'); ax[0,1].set_ylabel('$\\psi_{20}+\\psi_{21}h_{1}+\\psi_{22}h_{2}+\\psi_{23}h_3$')\n",
|
" ax[0,1].plot(x,layer2_pre_2,'b-'); ax[0,1].set_ylabel(r'$\\psi_{20}+\\psi_{21}h_{1}+\\psi_{22}h_{2}+\\psi_{23}h_3$')\n",
|
||||||
" ax[0,2].plot(x,layer2_pre_3,'g-'); ax[0,2].set_ylabel('$\\psi_{30}+\\psi_{31}h_{1}+\\psi_{32}h_{2}+\\psi_{33}h_3$')\n",
|
" ax[0,2].plot(x,layer2_pre_3,'g-'); ax[0,2].set_ylabel(r'$\\psi_{30}+\\psi_{31}h_{1}+\\psi_{32}h_{2}+\\psi_{33}h_3$')\n",
|
||||||
" ax[1,0].plot(x,h1_prime,'r-'); ax[1,0].set_ylabel(\"$h_{1}^{'}$\")\n",
|
" ax[1,0].plot(x,h1_prime,'r-'); ax[1,0].set_ylabel(r\"$h_{1}^{'}$\")\n",
|
||||||
" ax[1,1].plot(x,h2_prime,'b-'); ax[1,1].set_ylabel(\"$h_{2}^{'}$\")\n",
|
" ax[1,1].plot(x,h2_prime,'b-'); ax[1,1].set_ylabel(r\"$h_{2}^{'}$\")\n",
|
||||||
" ax[1,2].plot(x,h3_prime,'g-'); ax[1,2].set_ylabel(\"$h_{3}^{'}$\")\n",
|
" ax[1,2].plot(x,h3_prime,'g-'); ax[1,2].set_ylabel(r\"$h_{3}^{'}$\")\n",
|
||||||
" ax[2,0].plot(x,phi1_h1_prime,'r-'); ax[2,0].set_ylabel(\"$\\phi_1 h_{1}^{'}$\")\n",
|
" ax[2,0].plot(x,phi1_h1_prime,'r-'); ax[2,0].set_ylabel(r\"$\\phi_1 h_{1}^{'}$\")\n",
|
||||||
" ax[2,1].plot(x,phi2_h2_prime,'b-'); ax[2,1].set_ylabel(\"$\\phi_2 h_{2}^{'}$\")\n",
|
" ax[2,1].plot(x,phi2_h2_prime,'b-'); ax[2,1].set_ylabel(r\"$\\phi_2 h_{2}^{'}$\")\n",
|
||||||
" ax[2,2].plot(x,phi3_h3_prime,'g-'); ax[2,2].set_ylabel(\"$\\phi_3 h_{3}^{'}$\")\n",
|
" ax[2,2].plot(x,phi3_h3_prime,'g-'); ax[2,2].set_ylabel(r\"$\\phi_3 h_{3}^{'}$\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
" for plot_y in range(3):\n",
|
" for plot_y in range(3):\n",
|
||||||
" for plot_x in range(3):\n",
|
" for plot_x in range(3):\n",
|
||||||
" ax[plot_y,plot_x].set_xlim([0,1]);ax[plot_x,plot_y].set_ylim([-1,1])\n",
|
" ax[plot_y,plot_x].set_xlim([0,1]);ax[plot_x,plot_y].set_ylim([-1,1])\n",
|
||||||
" ax[plot_y,plot_x].set_aspect(0.5)\n",
|
" ax[plot_y,plot_x].set_aspect(0.5)\n",
|
||||||
" ax[2,plot_y].set_xlabel('Input, $x$');\n",
|
" ax[2,plot_y].set_xlabel(r'Input, $x$');\n",
|
||||||
" plt.show()\n",
|
" plt.show()\n",
|
||||||
"\n",
|
"\n",
|
||||||
" fig, ax = plt.subplots()\n",
|
" fig, ax = plt.subplots()\n",
|
||||||
" ax.plot(x,y)\n",
|
" ax.plot(x,y)\n",
|
||||||
" ax.set_xlabel('Input, $x$'); ax.set_ylabel('Output, $y$')\n",
|
" ax.set_xlabel(r'Input, $x$'); ax.set_ylabel(r'Output, $y$')\n",
|
||||||
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
||||||
" ax.set_aspect(0.5)\n",
|
" ax.set_aspect(0.5)\n",
|
||||||
" plt.show()"
|
" plt.show()"
|
||||||
|
|||||||
@@ -118,7 +118,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Let's define a network. We'll just consider the inputs and outputs over the range [-1,1]. If you set the \"plot_all\" flat to True, you can see the details of how it was created."
|
"Let's define a network. We'll just consider the inputs and outputs over the range [-1,1]."
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "LxBJCObC-NTY"
|
"id": "LxBJCObC-NTY"
|
||||||
|
|||||||
@@ -118,7 +118,7 @@
|
|||||||
" ax.plot(x_model,y_model)\n",
|
" ax.plot(x_model,y_model)\n",
|
||||||
" if sigma_model is not None:\n",
|
" if sigma_model is not None:\n",
|
||||||
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
" ax.fill_between(x_model, y_model-2*sigma_model, y_model+2*sigma_model, color='lightgray')\n",
|
||||||
" ax.set_xlabel('Input, $x$'); ax.set_ylabel('Output, $y$')\n",
|
" ax.set_xlabel(r'Input, $x$'); ax.set_ylabel(r'Output, $y$')\n",
|
||||||
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
" ax.set_xlim([0,1]);ax.set_ylim([-1,1])\n",
|
||||||
" ax.set_aspect(0.5)\n",
|
" ax.set_aspect(0.5)\n",
|
||||||
" if title is not None:\n",
|
" if title is not None:\n",
|
||||||
@@ -185,7 +185,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Return probability under normal distribution for input x\n",
|
"# Return probability under normal distribution\n",
|
||||||
"def normal_distribution(y, mu, sigma):\n",
|
"def normal_distribution(y, mu, sigma):\n",
|
||||||
" # TODO-- write in the equation for the normal distribution\n",
|
" # TODO-- write in the equation for the normal distribution\n",
|
||||||
" # Equation 5.7 from the notes (you will need np.sqrt() and np.exp(), and math.pi)\n",
|
" # Equation 5.7 from the notes (you will need np.sqrt() and np.exp(), and math.pi)\n",
|
||||||
@@ -222,7 +222,7 @@
|
|||||||
"gauss_prob = normal_distribution(y_gauss, mu, sigma)\n",
|
"gauss_prob = normal_distribution(y_gauss, mu, sigma)\n",
|
||||||
"fig, ax = plt.subplots()\n",
|
"fig, ax = plt.subplots()\n",
|
||||||
"ax.plot(y_gauss, gauss_prob)\n",
|
"ax.plot(y_gauss, gauss_prob)\n",
|
||||||
"ax.set_xlabel('Input, $y$'); ax.set_ylabel('Probability $Pr(y)$')\n",
|
"ax.set_xlabel(r'Input, $y$'); ax.set_ylabel(r'Probability $Pr(y)$')\n",
|
||||||
"ax.set_xlim([-5,5]);ax.set_ylim([0,1.0])\n",
|
"ax.set_xlim([-5,5]);ax.set_ylim([0,1.0])\n",
|
||||||
"plt.show()\n",
|
"plt.show()\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -329,7 +329,7 @@
|
|||||||
"mu_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
"mu_pred = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
"# Set the standard deviation to something reasonable\n",
|
"# Set the standard deviation to something reasonable\n",
|
||||||
"sigma = 0.2\n",
|
"sigma = 0.2\n",
|
||||||
"# Compute the log likelihood\n",
|
"# Compute the negative log likelihood\n",
|
||||||
"nll = compute_negative_log_likelihood(y_train, mu_pred, sigma)\n",
|
"nll = compute_negative_log_likelihood(y_train, mu_pred, sigma)\n",
|
||||||
"# Let's double check we get the right answer before proceeding\n",
|
"# Let's double check we get the right answer before proceeding\n",
|
||||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(11.452419564,nll))"
|
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(11.452419564,nll))"
|
||||||
@@ -388,7 +388,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's investigate finding the maximum likelihood / minimum log likelihood / least squares solution. For simplicity, we'll assume that all the parameters are correct except one and look at how the likelihood, log likelihood, and sum of squares change as we manipulate the last parameter. We'll start with overall y offset, beta_1 (formerly phi_0)"
|
"Now let's investigate finding the maximum likelihood / minimum negative log likelihood / least squares solution. For simplicity, we'll assume that all the parameters are correct except one and look at how the likelihood, negative log likelihood, and sum of squares change as we manipulate the last parameter. We'll start with overall y offset, beta_1 (formerly phi_0)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OgcRojvPWh4V"
|
"id": "OgcRojvPWh4V"
|
||||||
@@ -431,7 +431,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
|
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function of the value of the offset beta1\n",
|
||||||
"fig, ax = plt.subplots(1,2)\n",
|
"fig, ax = plt.subplots(1,2)\n",
|
||||||
"fig.set_size_inches(10.5, 5.5)\n",
|
"fig.set_size_inches(10.5, 5.5)\n",
|
||||||
"fig.tight_layout(pad=10.0)\n",
|
"fig.tight_layout(pad=10.0)\n",
|
||||||
@@ -530,7 +530,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the standard divation sigma\n",
|
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function of the value of the standard deviation sigma\n",
|
||||||
"fig, ax = plt.subplots(1,2)\n",
|
"fig, ax = plt.subplots(1,2)\n",
|
||||||
"fig.set_size_inches(10.5, 5.5)\n",
|
"fig.set_size_inches(10.5, 5.5)\n",
|
||||||
"fig.tight_layout(pad=10.0)\n",
|
"fig.tight_layout(pad=10.0)\n",
|
||||||
@@ -581,7 +581,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Obviously, to fit the full neural model we would vary all of the 10 parameters of the network in $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ (and maybe $\\sigma$) until we find the combination that have the maximum likelihood / minimum negative log likelihood / least squares.<br><br>\n",
|
"Obviously, to fit the full neural model we would vary all of the 10 parameters of the network in $\\boldsymbol\\beta_{0},\\boldsymbol\\Omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\Omega_{1}$ (and maybe $\\sigma$) until we find the combination that have the maximum likelihood / minimum negative log likelihood / least squares.<br><br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Here we just varied one at a time as it is easier to see what is going on. This is known as **coordinate descent**.\n"
|
"Here we just varied one at a time as it is easier to see what is going on. This is known as **coordinate descent**.\n"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOSb+W2AOFVQm8FZcHAb2Jq",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -120,12 +119,12 @@
|
|||||||
" fig.set_size_inches(7.0, 3.5)\n",
|
" fig.set_size_inches(7.0, 3.5)\n",
|
||||||
" fig.tight_layout(pad=3.0)\n",
|
" fig.tight_layout(pad=3.0)\n",
|
||||||
" ax[0].plot(x_model,out_model)\n",
|
" ax[0].plot(x_model,out_model)\n",
|
||||||
" ax[0].set_xlabel('Input, $x$'); ax[0].set_ylabel('Model output')\n",
|
" ax[0].set_xlabel(r'Input, $x$'); ax[0].set_ylabel(r'Model output')\n",
|
||||||
" ax[0].set_xlim([0,1]);ax[0].set_ylim([-4,4])\n",
|
" ax[0].set_xlim([0,1]);ax[0].set_ylim([-4,4])\n",
|
||||||
" if title is not None:\n",
|
" if title is not None:\n",
|
||||||
" ax[0].set_title(title)\n",
|
" ax[0].set_title(title)\n",
|
||||||
" ax[1].plot(x_model,lambda_model)\n",
|
" ax[1].plot(x_model,lambda_model)\n",
|
||||||
" ax[1].set_xlabel('Input, $x$'); ax[1].set_ylabel('$\\lambda$ or Pr(y=1|x)')\n",
|
" ax[1].set_xlabel(r'Input, $x$'); ax[1].set_ylabel(r'$\\lambda$ or Pr(y=1|x)')\n",
|
||||||
" ax[1].set_xlim([0,1]);ax[1].set_ylim([-0.05,1.05])\n",
|
" ax[1].set_xlim([0,1]);ax[1].set_ylim([-0.05,1.05])\n",
|
||||||
" if title is not None:\n",
|
" if title is not None:\n",
|
||||||
" ax[1].set_title(title)\n",
|
" ax[1].set_title(title)\n",
|
||||||
@@ -199,7 +198,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"The left is model output and the right is the model output after the sigmoid has been applied, so it now lies in the range [0,1] and represents the probability, that y=1. The black dots show the training data. We'll compute the the likelihood and the negative log likelihood."
|
"The left is model output and the right is the model output after the sigmoid has been applied, so it now lies in the range [0,1] and represents the probability, that y=1. The black dots show the training data. We'll compute the likelihood and the negative log likelihood."
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "MvVX6tl9AEXF"
|
"id": "MvVX6tl9AEXF"
|
||||||
@@ -208,7 +207,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Return probability under Bernoulli distribution for input x\n",
|
"# Return probability under Bernoulli distribution for observed class y\n",
|
||||||
"def bernoulli_distribution(y, lambda_param):\n",
|
"def bernoulli_distribution(y, lambda_param):\n",
|
||||||
" # TODO-- write in the equation for the Bernoulli distribution\n",
|
" # TODO-- write in the equation for the Bernoulli distribution\n",
|
||||||
" # Equation 5.17 from the notes (you will need np.power)\n",
|
" # Equation 5.17 from the notes (you will need np.power)\n",
|
||||||
@@ -269,7 +268,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Let's test this\n",
|
"# Let's test this\n",
|
||||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
"# Use our neural network to predict the Bernoulli parameter lambda\n",
|
||||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
"lambda_train = sigmoid(model_out)\n",
|
"lambda_train = sigmoid(model_out)\n",
|
||||||
"# Compute the likelihood\n",
|
"# Compute the likelihood\n",
|
||||||
@@ -336,7 +335,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's investigate finding the maximum likelihood / minimum negative log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, beta_1 (formerly phi_0)"
|
"Now let's investigate finding the maximum likelihood / minimum negative log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and negative log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, beta_1 (formerly phi_0)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OgcRojvPWh4V"
|
"id": "OgcRojvPWh4V"
|
||||||
@@ -359,7 +358,7 @@
|
|||||||
" # Run the network with new parameters\n",
|
" # Run the network with new parameters\n",
|
||||||
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
" lambda_train = sigmoid(model_out)\n",
|
" lambda_train = sigmoid(model_out)\n",
|
||||||
" # Compute and store the three values\n",
|
" # Compute and store the two values\n",
|
||||||
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
||||||
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||||
" # Draw the model for every 20th parameter setting\n",
|
" # Draw the model for every 20th parameter setting\n",
|
||||||
@@ -378,7 +377,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
|
"# Now let's plot the likelihood and negative log likelihood as a function of the value of the offset beta1\n",
|
||||||
"fig, ax = plt.subplots()\n",
|
"fig, ax = plt.subplots()\n",
|
||||||
"fig.tight_layout(pad=5.0)\n",
|
"fig.tight_layout(pad=5.0)\n",
|
||||||
"likelihood_color = 'tab:red'\n",
|
"likelihood_color = 'tab:red'\n",
|
||||||
@@ -430,7 +429,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Again, to fit the full neural model we would vary all of the 10 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
"Again, to fit the full neural model we would vary all of the 10 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\Omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\Omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
||||||
"\n"
|
"\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -1,18 +1,16 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "jSlFkICHwHQF"
|
"id": "jSlFkICHwHQF"
|
||||||
@@ -142,7 +140,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "PsgLZwsPxauP"
|
"id": "PsgLZwsPxauP"
|
||||||
@@ -209,13 +206,12 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "MvVX6tl9AEXF"
|
"id": "MvVX6tl9AEXF"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"The left is model output and the right is the model output after the softmax has been applied, so it now lies in the range [0,1] and represents the probability, that y=0 (red), 1 (green) and 2 (blue) The dots at the bottom show the training data with the same color scheme. So we want the red curve to be high where there are red dots, the green curve to be high where there are green dots, and the blue curve to be high where there are blue dots We'll compute the the likelihood and the negative log likelihood."
|
"The left is model output and the right is the model output after the softmax has been applied, so it now lies in the range [0,1] and represents the probability, that y=0 (red), 1 (green) and 2 (blue). The dots at the bottom show the training data with the same color scheme. So we want the red curve to be high where there are red dots, the green curve to be high where there are green dots, and the blue curve to be high where there are blue dots We'll compute the the likelihood and the negative log likelihood."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -226,7 +222,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Return probability under Categorical distribution for input x\n",
|
"# Return probability under categorical distribution for observed class y\n",
|
||||||
"# Just take value from row k of lambda param where y =k,\n",
|
"# Just take value from row k of lambda param where y =k,\n",
|
||||||
"def categorical_distribution(y, lambda_param):\n",
|
"def categorical_distribution(y, lambda_param):\n",
|
||||||
" return np.array([lambda_param[row, i] for i, row in enumerate (y)])"
|
" return np.array([lambda_param[row, i] for i, row in enumerate (y)])"
|
||||||
@@ -248,7 +244,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "R5z_0dzQMF35"
|
"id": "R5z_0dzQMF35"
|
||||||
@@ -286,7 +281,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Let's test this\n",
|
"# Let's test this\n",
|
||||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
"# Use our neural network to predict the parameters of the categorical distribution\n",
|
||||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
"lambda_train = softmax(model_out)\n",
|
"lambda_train = softmax(model_out)\n",
|
||||||
"# Compute the likelihood\n",
|
"# Compute the likelihood\n",
|
||||||
@@ -296,7 +291,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "HzphKgPfOvlk"
|
"id": "HzphKgPfOvlk"
|
||||||
@@ -318,7 +312,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Return the negative log likelihood of the data under the model\n",
|
"# Return the negative log likelihood of the data under the model\n",
|
||||||
"def compute_negative_log_likelihood(y_train, lambda_param):\n",
|
"def compute_negative_log_likelihood(y_train, lambda_param):\n",
|
||||||
" # TODO -- compute the likelihood of the data -- don't use the likelihood function above -- compute the negative sum of the log probabilities\n",
|
" # TODO -- compute the negative log likelihood of the data -- don't use the likelihood function above -- compute the negative sum of the log probabilities\n",
|
||||||
" # You will need np.sum(), np.log()\n",
|
" # You will need np.sum(), np.log()\n",
|
||||||
" # Replace the line below\n",
|
" # Replace the line below\n",
|
||||||
" nll = 0\n",
|
" nll = 0\n",
|
||||||
@@ -336,24 +330,23 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# Let's test this\n",
|
"# Let's test this\n",
|
||||||
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
"beta_0, omega_0, beta_1, omega_1 = get_parameters()\n",
|
||||||
"# Use our neural network to predict the mean of the Gaussian\n",
|
"# Use our neural network to predict the parameters of the categorical distribution\n",
|
||||||
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
"model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
"# Pass the outputs through the softmax function\n",
|
"# Pass the outputs through the softmax function\n",
|
||||||
"lambda_train = softmax(model_out)\n",
|
"lambda_train = softmax(model_out)\n",
|
||||||
"# Compute the log likelihood\n",
|
"# Compute the negative log likelihood\n",
|
||||||
"nll = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
"nll = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||||
"# Let's double check we get the right answer before proceeding\n",
|
"# Let's double check we get the right answer before proceeding\n",
|
||||||
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(17.015457867,nll))"
|
"print(\"Correct answer = %9.9f, Your answer = %9.9f\"%(17.015457867,nll))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OgcRojvPWh4V"
|
"id": "OgcRojvPWh4V"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's investigate finding the maximum likelihood / minimum log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, $\\beta_1$ (formerly $\\phi_0$)"
|
"Now let's investigate finding the maximum likelihood / minimum negative log likelihood solution. For simplicity, we'll assume that all the parameters are fixed except one and look at how the likelihood and negative log likelihood change as we manipulate the last parameter. We'll start with overall y_offset, $\\beta_1$ (formerly $\\phi_0$)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -378,7 +371,7 @@
|
|||||||
" # Run the network with new parameters\n",
|
" # Run the network with new parameters\n",
|
||||||
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
" model_out = shallow_nn(x_train, beta_0, omega_0, beta_1, omega_1)\n",
|
||||||
" lambda_train = softmax(model_out)\n",
|
" lambda_train = softmax(model_out)\n",
|
||||||
" # Compute and store the three values\n",
|
" # Compute and store the two values\n",
|
||||||
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
" likelihoods[count] = compute_likelihood(y_train,lambda_train)\n",
|
||||||
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
" nlls[count] = compute_negative_log_likelihood(y_train, lambda_train)\n",
|
||||||
" # Draw the model for every 20th parameter setting\n",
|
" # Draw the model for every 20th parameter setting\n",
|
||||||
@@ -397,7 +390,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
|
"# Now let's plot the likelihood and negative log likelihood as a function of the value of the offset beta1\n",
|
||||||
"fig, ax = plt.subplots()\n",
|
"fig, ax = plt.subplots()\n",
|
||||||
"fig.tight_layout(pad=5.0)\n",
|
"fig.tight_layout(pad=5.0)\n",
|
||||||
"likelihood_color = 'tab:red'\n",
|
"likelihood_color = 'tab:red'\n",
|
||||||
@@ -440,7 +433,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "771G8N1Vk5A2"
|
"id": "771G8N1Vk5A2"
|
||||||
@@ -448,16 +440,15 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
"They both give the same answer. But you can see from the likelihood above that the likelihood is very small unless the parameters are almost correct. So in practice, we would work with the negative log likelihood.<br><br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Again, to fit the full neural model we would vary all of the 16 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
"Again, to fit the full neural model we would vary all of the 16 parameters of the network in the $\\boldsymbol\\beta_{0},\\boldsymbol\\Omega_{0},\\boldsymbol\\beta_{1},\\boldsymbol\\Omega_{1}$ until we find the combination that have the maximum likelihood / minimum negative log likelihood.<br><br>\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"authorship_tag": "ABX9TyOPv/l+ToaApJV7Nz+8AtpV",
|
"provenance": [],
|
||||||
"include_colab_link": true,
|
"include_colab_link": true
|
||||||
"provenance": []
|
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyN4E9Vtuk6t2BhZ0Ajv5SW3",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -67,7 +66,7 @@
|
|||||||
" fig,ax = plt.subplots()\n",
|
" fig,ax = plt.subplots()\n",
|
||||||
" ax.plot(phi_plot,loss_function(phi_plot),'r-')\n",
|
" ax.plot(phi_plot,loss_function(phi_plot),'r-')\n",
|
||||||
" ax.set_xlim(0,1); ax.set_ylim(0,1)\n",
|
" ax.set_xlim(0,1); ax.set_ylim(0,1)\n",
|
||||||
" ax.set_xlabel('$\\phi$'); ax.set_ylabel('$L[\\phi]$')\n",
|
" ax.set_xlabel(r'$\\phi$'); ax.set_ylabel(r'$L[\\phi]$')\n",
|
||||||
" if a is not None and b is not None and c is not None and d is not None:\n",
|
" if a is not None and b is not None and c is not None and d is not None:\n",
|
||||||
" plt.axvspan(a, d, facecolor='k', alpha=0.2)\n",
|
" plt.axvspan(a, d, facecolor='k', alpha=0.2)\n",
|
||||||
" ax.plot([a,a],[0,1],'b-')\n",
|
" ax.plot([a,a],[0,1],'b-')\n",
|
||||||
@@ -113,7 +112,7 @@
|
|||||||
" b = 0.33\n",
|
" b = 0.33\n",
|
||||||
" c = 0.66\n",
|
" c = 0.66\n",
|
||||||
" d = 1.0\n",
|
" d = 1.0\n",
|
||||||
" n_iter =0;\n",
|
" n_iter = 0\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # While we haven't found the minimum closely enough\n",
|
" # While we haven't found the minimum closely enough\n",
|
||||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||||
@@ -131,8 +130,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" print('Iter %d, a=%3.3f, b=%3.3f, c=%3.3f, d=%3.3f'%(n_iter, a,b,c,d))\n",
|
" print('Iter %d, a=%3.3f, b=%3.3f, c=%3.3f, d=%3.3f'%(n_iter, a,b,c,d))\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #1 If the HEIGHT at point A is less the HEIGHT at points B, C, and D then halve values of B, C, and D\n",
|
" # Rule #1 If the HEIGHT at point A is less than the HEIGHT at points B, C, and D then halve values of B, C, and D\n",
|
||||||
" # i.e. bring them closer to the original point\n",
|
|
||||||
" # i.e. bring them closer to the original point\n",
|
" # i.e. bring them closer to the original point\n",
|
||||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||||
" if (0):\n",
|
" if (0):\n",
|
||||||
@@ -140,7 +138,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #2 If the HEIGHT at point b is less than the HEIGHT at point c then\n",
|
" # Rule #2 If the HEIGHT at point b is less than the HEIGHT at point c then\n",
|
||||||
" # then point d becomes point c, and\n",
|
" # point d becomes point c, and\n",
|
||||||
" # point b becomes 1/3 between a and new d\n",
|
" # point b becomes 1/3 between a and new d\n",
|
||||||
" # point c becomes 2/3 between a and new d\n",
|
" # point c becomes 2/3 between a and new d\n",
|
||||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||||
@@ -148,7 +146,7 @@
|
|||||||
" continue;\n",
|
" continue;\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #3 If the HEIGHT at point c is less than the HEIGHT at point b then\n",
|
" # Rule #3 If the HEIGHT at point c is less than the HEIGHT at point b then\n",
|
||||||
" # then point a becomes point b, and\n",
|
" # point a becomes point b, and\n",
|
||||||
" # point b becomes 1/3 between new a and d\n",
|
" # point b becomes 1/3 between new a and d\n",
|
||||||
" # point c becomes 2/3 between new a and d\n",
|
" # point c becomes 2/3 between new a and d\n",
|
||||||
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
" # TODO REPLACE THE BLOCK OF CODE BELOW WITH THIS RULE\n",
|
||||||
|
|||||||
@@ -117,7 +117,7 @@
|
|||||||
"id": "QU5mdGvpTtEG"
|
"id": "QU5mdGvpTtEG"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Now lets create compute the sum of squares loss for the training data"
|
"Now let's compute the sum of squares loss for the training data"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -265,7 +265,7 @@
|
|||||||
"\\frac{\\partial L}{\\partial \\phi_{1}}&\\approx & \\frac{L[\\phi_0, \\phi_1+\\delta]-L[\\phi_0, \\phi_1]}{\\delta}\n",
|
"\\frac{\\partial L}{\\partial \\phi_{1}}&\\approx & \\frac{L[\\phi_0, \\phi_1+\\delta]-L[\\phi_0, \\phi_1]}{\\delta}\n",
|
||||||
"\\end{align}\n",
|
"\\end{align}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We can't do this when there are many parameters; for a million parameters, we would have to evaluate the loss function two million times, and usually computing the gradients directly is much more efficient."
|
"We can't do this when there are many parameters; for a million parameters, we would have to evaluate the loss function one million plus one times, and usually computing the gradients directly is much more efficient."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -317,7 +317,7 @@
|
|||||||
" b = 0.33 * max_dist\n",
|
" b = 0.33 * max_dist\n",
|
||||||
" c = 0.66 * max_dist\n",
|
" c = 0.66 * max_dist\n",
|
||||||
" d = 1.0 * max_dist\n",
|
" d = 1.0 * max_dist\n",
|
||||||
" n_iter =0;\n",
|
" n_iter = 0\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # While we haven't found the minimum closely enough\n",
|
" # While we haven't found the minimum closely enough\n",
|
||||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||||
@@ -341,7 +341,7 @@
|
|||||||
" continue;\n",
|
" continue;\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #2 If point b is less than point c then\n",
|
" # Rule #2 If point b is less than point c then\n",
|
||||||
" # then point d becomes point c, and\n",
|
" # point d becomes point c, and\n",
|
||||||
" # point b becomes 1/3 between a and new d\n",
|
" # point b becomes 1/3 between a and new d\n",
|
||||||
" # point c becomes 2/3 between a and new d\n",
|
" # point c becomes 2/3 between a and new d\n",
|
||||||
" if lossb < lossc:\n",
|
" if lossb < lossc:\n",
|
||||||
@@ -351,7 +351,7 @@
|
|||||||
" continue\n",
|
" continue\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #2 If point c is less than point b then\n",
|
" # Rule #2 If point c is less than point b then\n",
|
||||||
" # then point a becomes point b, and\n",
|
" # point a becomes point b, and\n",
|
||||||
" # point b becomes 1/3 between new a and d\n",
|
" # point b becomes 1/3 between new a and d\n",
|
||||||
" # point c becomes 2/3 between new a and d\n",
|
" # point c becomes 2/3 between new a and d\n",
|
||||||
" a = b\n",
|
" a = b\n",
|
||||||
|
|||||||
@@ -53,7 +53,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Let's create our training data 30 pairs {x_i, y_i}\n",
|
"# Let's create our training data of 30 pairs {x_i, y_i}\n",
|
||||||
"# We'll try to fit the Gabor model to these data\n",
|
"# We'll try to fit the Gabor model to these data\n",
|
||||||
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
||||||
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
||||||
@@ -128,7 +128,7 @@
|
|||||||
"id": "QU5mdGvpTtEG"
|
"id": "QU5mdGvpTtEG"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Now lets create compute the sum of squares loss for the training data"
|
"Now let's compute the sum of squares loss for the training data"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -198,7 +198,7 @@
|
|||||||
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||||
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Make grid of intercept/slope values to plot\n",
|
" # Make grid of offset/frequency values to plot\n",
|
||||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||||
" # Compute loss for every set of parameters\n",
|
" # Compute loss for every set of parameters\n",
|
||||||
@@ -343,7 +343,7 @@
|
|||||||
" b = 0.33 * max_dist\n",
|
" b = 0.33 * max_dist\n",
|
||||||
" c = 0.66 * max_dist\n",
|
" c = 0.66 * max_dist\n",
|
||||||
" d = 1.0 * max_dist\n",
|
" d = 1.0 * max_dist\n",
|
||||||
" n_iter =0;\n",
|
" n_iter = 0\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # While we haven't found the minimum closely enough\n",
|
" # While we haven't found the minimum closely enough\n",
|
||||||
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
" while np.abs(b-c) > thresh and n_iter < max_iter:\n",
|
||||||
@@ -367,7 +367,7 @@
|
|||||||
" continue;\n",
|
" continue;\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #2 If point b is less than point c then\n",
|
" # Rule #2 If point b is less than point c then\n",
|
||||||
" # then point d becomes point c, and\n",
|
" # point d becomes point c, and\n",
|
||||||
" # point b becomes 1/3 between a and new d\n",
|
" # point b becomes 1/3 between a and new d\n",
|
||||||
" # point c becomes 2/3 between a and new d\n",
|
" # point c becomes 2/3 between a and new d\n",
|
||||||
" if lossb < lossc:\n",
|
" if lossb < lossc:\n",
|
||||||
@@ -377,7 +377,7 @@
|
|||||||
" continue\n",
|
" continue\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Rule #2 If point c is less than point b then\n",
|
" # Rule #2 If point c is less than point b then\n",
|
||||||
" # then point a becomes point b, and\n",
|
" # point a becomes point b, and\n",
|
||||||
" # point b becomes 1/3 between new a and d\n",
|
" # point b becomes 1/3 between new a and d\n",
|
||||||
" # point c becomes 2/3 between new a and d\n",
|
" # point c becomes 2/3 between new a and d\n",
|
||||||
" a = b\n",
|
" a = b\n",
|
||||||
|
|||||||
@@ -61,7 +61,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Let's create our training data 30 pairs {x_i, y_i}\n",
|
"# Let's create our training data of 30 pairs {x_i, y_i}\n",
|
||||||
"# We'll try to fit the Gabor model to these data\n",
|
"# We'll try to fit the Gabor model to these data\n",
|
||||||
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
"data = np.array([[-1.920e+00,-1.422e+01,1.490e+00,-1.940e+00,-2.389e+00,-5.090e+00,\n",
|
||||||
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
" -8.861e+00,3.578e+00,-6.010e+00,-6.995e+00,3.634e+00,8.743e-01,\n",
|
||||||
@@ -137,7 +137,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Now lets compute the sum of squares loss for the training data and plot the loss function"
|
"Now let's compute the sum of squares loss for the training data and plot the loss function"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "QU5mdGvpTtEG"
|
"id": "QU5mdGvpTtEG"
|
||||||
@@ -160,7 +160,7 @@
|
|||||||
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
" b = np.floor(my_colormap_vals_dec - r * 256 *256 - g * 256)\n",
|
||||||
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
" my_colormap = ListedColormap(np.vstack((r,g,b)).transpose()/255.0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Make grid of intercept/slope values to plot\n",
|
" # Make grid of offset/frequency values to plot\n",
|
||||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||||
" # Compute loss for every set of parameters\n",
|
" # Compute loss for every set of parameters\n",
|
||||||
@@ -365,7 +365,6 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" # Update the parameters\n",
|
" # Update the parameters\n",
|
||||||
" phi_all[:,c_step+1:c_step+2] = phi_all[:,c_step:c_step+1] - alpha * momentum\n",
|
" phi_all[:,c_step+1:c_step+2] = phi_all[:,c_step:c_step+1] - alpha * momentum\n",
|
||||||
" # Measure loss and draw model every 8th step\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
"loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||||
"draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
"draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyNFsCOnucz1nQt7PBEnKeTV",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -109,8 +108,8 @@
|
|||||||
" ax.contour(phi0mesh, phi1mesh, loss_function, 20, colors=['#80808080'])\n",
|
" ax.contour(phi0mesh, phi1mesh, loss_function, 20, colors=['#80808080'])\n",
|
||||||
" ax.plot(opt_path[0,:], opt_path[1,:],'-', color='#a0d9d3ff')\n",
|
" ax.plot(opt_path[0,:], opt_path[1,:],'-', color='#a0d9d3ff')\n",
|
||||||
" ax.plot(opt_path[0,:], opt_path[1,:],'.', color='#a0d9d3ff',markersize=10)\n",
|
" ax.plot(opt_path[0,:], opt_path[1,:],'.', color='#a0d9d3ff',markersize=10)\n",
|
||||||
" ax.set_xlabel(\"$\\phi_{0}$\")\n",
|
" ax.set_xlabel(r\"$\\phi_{0}$\")\n",
|
||||||
" ax.set_ylabel(\"$\\phi_1}$\")\n",
|
" ax.set_ylabel(r\"$\\phi_{1}$\")\n",
|
||||||
" plt.show()"
|
" plt.show()"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -169,7 +168,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Because the function changes much faster in $\\phi_1$ than in $\\phi_0$, there is no great step size to choose. If we set the step size so that it makes sensible progress in the $\\phi_1$, then it takes many iterations to converge. If we set the step size tso that we make sensible progress in the $\\phi_{0}$ direction, then the path oscillates in the $\\phi_1$ direction. \n",
|
"Because the function changes much faster in $\\phi_1$ than in $\\phi_0$, there is no great step size to choose. If we set the step size so that it makes sensible progress in the $\\phi_1$ direction, then it takes many iterations to converge. If we set the step size so that we make sensible progress in the $\\phi_0$ direction, then the path oscillates in the $\\phi_1$ direction. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"This motivates Adam. At the core of Adam is the idea that we should just determine which way is downhill along each axis (i.e. left/right for $\\phi_0$ or up/down for $\\phi_1$) and move a fixed distance in that direction."
|
"This motivates Adam. At the core of Adam is the idea that we should just determine which way is downhill along each axis (i.e. left/right for $\\phi_0$ or up/down for $\\phi_1$) and move a fixed distance in that direction."
|
||||||
],
|
],
|
||||||
@@ -222,7 +221,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"This moves towards the minimum at a sensible speed, but we never actually converge -- the solution just bounces back and forth between the last two points. To make it converge, we add momentum to both the estimates of the gradient and the pointwise squared gradient. We also modify the statistics by a factor that depends on the time to make sure the progress is now slow to start with."
|
"This moves towards the minimum at a sensible speed, but we never actually converge -- the solution just bounces back and forth between the last two points. To make it converge, we add momentum to both the estimates of the gradient and the pointwise squared gradient. We also modify the statistics by a factor that depends on the time to make sure the progress is not slow to start with."
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "_6KoKBJdGGI4"
|
"id": "_6KoKBJdGGI4"
|
||||||
|
|||||||
@@ -131,7 +131,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"beta0 = 1.0; beta1 = 2.0; beta2 = -3.0; beta3 = 0.4\n",
|
"beta0 = 1.0; beta1 = 2.0; beta2 = -3.0; beta3 = 0.4\n",
|
||||||
"omega0 = 0.1; omega1 = -0.4; omega2 = 2.0; omega3 = 3.0\n",
|
"omega0 = 0.1; omega1 = -0.4; omega2 = 2.0; omega3 = 3.0\n",
|
||||||
"x = 2.3; y =2.0\n",
|
"x = 2.3; y = 2.0\n",
|
||||||
"l_i_func = loss(x,y,beta0,beta1,beta2,beta3,omega0,omega1,omega2,omega3)\n",
|
"l_i_func = loss(x,y,beta0,beta1,beta2,beta3,omega0,omega1,omega2,omega3)\n",
|
||||||
"print('l_i=%3.3f'%l_i_func)"
|
"print('l_i=%3.3f'%l_i_func)"
|
||||||
]
|
]
|
||||||
@@ -279,7 +279,7 @@
|
|||||||
"f2: true value = 7.137, your value = 0.000\n",
|
"f2: true value = 7.137, your value = 0.000\n",
|
||||||
"h3: true value = 0.657, your value = 0.000\n",
|
"h3: true value = 0.657, your value = 0.000\n",
|
||||||
"f3: true value = 2.372, your value = 0.000\n",
|
"f3: true value = 2.372, your value = 0.000\n",
|
||||||
"like original = 0.139, like from forward pass = 0.000\n"
|
"l_i original = 0.139, l_i from forward pass = 0.000\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -292,7 +292,7 @@
|
|||||||
"print(\"f2: true value = %3.3f, your value = %3.3f\"%(7.137, f2))\n",
|
"print(\"f2: true value = %3.3f, your value = %3.3f\"%(7.137, f2))\n",
|
||||||
"print(\"h3: true value = %3.3f, your value = %3.3f\"%(0.657, h3))\n",
|
"print(\"h3: true value = %3.3f, your value = %3.3f\"%(0.657, h3))\n",
|
||||||
"print(\"f3: true value = %3.3f, your value = %3.3f\"%(2.372, f3))\n",
|
"print(\"f3: true value = %3.3f, your value = %3.3f\"%(2.372, f3))\n",
|
||||||
"print(\"like original = %3.3f, like from forward pass = %3.3f\"%(l_i_func, l_i))\n"
|
"print(\"l_i original = %3.3f, l_i from forward pass = %3.3f\"%(l_i_func, l_i))\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -115,9 +115,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's run our random network. The weight matrices $\\boldsymbol\\Omega_{1\\ldots K}$ are the entries of the list \"all_weights\" and the biases $\\boldsymbol\\beta_{1\\ldots k}$ are the entries of the list \"all_biases\"\n",
|
"Now let's run our random network. The weight matrices $\\boldsymbol\\Omega_{1\\ldots K}$ are the entries of the list \"all_weights\" and the biases $\\boldsymbol\\beta_{1\\ldots K}$ are the entries of the list \"all_biases\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We know that we will need the activations $\\mathbf{f}_{0\\ldots K}$ and the activations $\\mathbf{h}_{1\\ldots K}$ for the forward pass of backpropagation, so we'll store and return these as well.\n"
|
"We know that we will need the preactivations $\\mathbf{f}_{0\\ldots K}$ and the activations $\\mathbf{h}_{1\\ldots K}$ for the forward pass of backpropagation, so we'll store and return these as well.\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "5irtyxnLJSGX"
|
"id": "5irtyxnLJSGX"
|
||||||
@@ -132,7 +132,7 @@
|
|||||||
" K = len(all_weights) -1\n",
|
" K = len(all_weights) -1\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
||||||
" # and the activations in a second list[all_h].\n",
|
" # and the activations in a second list \"all_h\".\n",
|
||||||
" all_f = [None] * (K+1)\n",
|
" all_f = [None] * (K+1)\n",
|
||||||
" all_h = [None] * (K+1)\n",
|
" all_h = [None] * (K+1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -143,7 +143,7 @@
|
|||||||
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
|
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
|
||||||
" for layer in range(K):\n",
|
" for layer in range(K):\n",
|
||||||
" # Update preactivations and activations at this layer according to eqn 7.16\n",
|
" # Update preactivations and activations at this layer according to eqn 7.16\n",
|
||||||
" # Remmember to use np.matmul for matrrix multiplications\n",
|
" # Remember to use np.matmul for matrix multiplications\n",
|
||||||
" # TODO -- Replace the lines below\n",
|
" # TODO -- Replace the lines below\n",
|
||||||
" all_f[layer] = all_h[layer]\n",
|
" all_f[layer] = all_h[layer]\n",
|
||||||
" all_h[layer+1] = all_f[layer]\n",
|
" all_h[layer+1] = all_f[layer]\n",
|
||||||
@@ -166,7 +166,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Define in input\n",
|
"# Define input\n",
|
||||||
"net_input = np.ones((D_i,1)) * 1.2\n",
|
"net_input = np.ones((D_i,1)) * 1.2\n",
|
||||||
"# Compute network output\n",
|
"# Compute network output\n",
|
||||||
"net_output, all_f, all_h = compute_network_output(net_input,all_weights, all_biases)\n",
|
"net_output, all_f, all_h = compute_network_output(net_input,all_weights, all_biases)\n",
|
||||||
@@ -249,7 +249,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" # Now work backwards through the network\n",
|
" # Now work backwards through the network\n",
|
||||||
" for layer in range(K,-1,-1):\n",
|
" for layer in range(K,-1,-1):\n",
|
||||||
" # TODO Calculate the derivatives of the loss with respect to the biases at layer this from all_dl_df[layer]. (eq 7.21)\n",
|
" # TODO Calculate the derivatives of the loss with respect to the biases at layer from all_dl_df[layer]. (eq 7.21)\n",
|
||||||
" # NOTE! To take a copy of matrix X, use Z=np.array(X)\n",
|
" # NOTE! To take a copy of matrix X, use Z=np.array(X)\n",
|
||||||
" # REPLACE THIS LINE\n",
|
" # REPLACE THIS LINE\n",
|
||||||
" all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n",
|
" all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n",
|
||||||
@@ -265,7 +265,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" if layer > 0:\n",
|
" if layer > 0:\n",
|
||||||
" # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use deriv of ReLu function, first part of last line of eq. 7.24)\n",
|
" # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use derivative of ReLu function, first part of last line of eq. 7.24)\n",
|
||||||
" # REPLACE THIS LINE\n",
|
" # REPLACE THIS LINE\n",
|
||||||
" all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n",
|
" all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyNHLXFpiSnUzAbzhtOk+bxu",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -117,10 +116,10 @@
|
|||||||
"def compute_network_output(net_input, all_weights, all_biases):\n",
|
"def compute_network_output(net_input, all_weights, all_biases):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Retrieve number of layers\n",
|
" # Retrieve number of layers\n",
|
||||||
" K = len(all_weights) -1\n",
|
" K = len(all_weights)-1\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
|
||||||
" # and the activations in a second list[all_h].\n",
|
" # and the activations in a second list \"all_h\".\n",
|
||||||
" all_f = [None] * (K+1)\n",
|
" all_f = [None] * (K+1)\n",
|
||||||
" all_h = [None] * (K+1)\n",
|
" all_h = [None] * (K+1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -151,7 +150,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's investigate how this the size of the outputs vary as we change the initialization variance:\n"
|
"Now let's investigate how the size of the outputs vary as we change the initialization variance:\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "bIUrcXnOqChl"
|
"id": "bIUrcXnOqChl"
|
||||||
@@ -164,7 +163,7 @@
|
|||||||
"K = 5\n",
|
"K = 5\n",
|
||||||
"# Number of neurons per layer\n",
|
"# Number of neurons per layer\n",
|
||||||
"D = 8\n",
|
"D = 8\n",
|
||||||
" # Input layer\n",
|
"# Input layer\n",
|
||||||
"D_i = 1\n",
|
"D_i = 1\n",
|
||||||
"# Output layer\n",
|
"# Output layer\n",
|
||||||
"D_o = 1\n",
|
"D_o = 1\n",
|
||||||
@@ -177,7 +176,7 @@
|
|||||||
"data_in = np.random.normal(size=(1,n_data))\n",
|
"data_in = np.random.normal(size=(1,n_data))\n",
|
||||||
"net_output, all_f, all_h = compute_network_output(data_in, all_weights, all_biases)\n",
|
"net_output, all_f, all_h = compute_network_output(data_in, all_weights, all_biases)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for layer in range(K):\n",
|
"for layer in range(1,K+1):\n",
|
||||||
" print(\"Layer %d, std of hidden units = %3.3f\"%(layer, np.std(all_h[layer])))"
|
" print(\"Layer %d, std of hidden units = %3.3f\"%(layer, np.std(all_h[layer])))"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -196,7 +195,7 @@
|
|||||||
"# Change this to 50 layers with 80 hidden units per layer\n",
|
"# Change this to 50 layers with 80 hidden units per layer\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# TO DO\n",
|
"# TO DO\n",
|
||||||
"# Now experiment with sigma_sq_omega to try to stop the variance of the forward computation explode"
|
"# Now experiment with sigma_sq_omega to try to stop the variance of the forward computation exploding"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "VL_SO4tar3DC"
|
"id": "VL_SO4tar3DC"
|
||||||
@@ -249,6 +248,9 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# Main backward pass routine\n",
|
"# Main backward pass routine\n",
|
||||||
"def backward_pass(all_weights, all_biases, all_f, all_h, y):\n",
|
"def backward_pass(all_weights, all_biases, all_f, all_h, y):\n",
|
||||||
|
" # Retrieve number of layers\n",
|
||||||
|
" K = len(all_weights) - 1\n",
|
||||||
|
"\n",
|
||||||
" # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n",
|
" # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n",
|
||||||
" all_dl_dweights = [None] * (K+1)\n",
|
" all_dl_dweights = [None] * (K+1)\n",
|
||||||
" all_dl_dbiases = [None] * (K+1)\n",
|
" all_dl_dbiases = [None] * (K+1)\n",
|
||||||
@@ -297,7 +299,7 @@
|
|||||||
"K = 5\n",
|
"K = 5\n",
|
||||||
"# Number of neurons per layer\n",
|
"# Number of neurons per layer\n",
|
||||||
"D = 8\n",
|
"D = 8\n",
|
||||||
" # Input layer\n",
|
"# Input layer\n",
|
||||||
"D_i = 1\n",
|
"D_i = 1\n",
|
||||||
"# Output layer\n",
|
"# Output layer\n",
|
||||||
"D_o = 1\n",
|
"D_o = 1\n",
|
||||||
@@ -335,8 +337,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# You can see that the values of the hidden units are increasing on average (the variance is across all hidden units at the layer\n",
|
"# You can see that the gradients of the hidden units are increasing on average (the standard deviation is across all hidden units at the layer\n",
|
||||||
"# and the 1000 training examples\n",
|
"# and the 100 training examples\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# TO DO\n",
|
"# TO DO\n",
|
||||||
"# Change this to 50 layers with 80 hidden units per layer\n",
|
"# Change this to 50 layers with 80 hidden units per layer\n",
|
||||||
|
|||||||
@@ -46,8 +46,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"%pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "ifVjS4cTOqKz"
|
"id": "ifVjS4cTOqKz"
|
||||||
@@ -83,6 +83,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
|
"!mkdir ./sample_data\n",
|
||||||
|
"\n",
|
||||||
"args = mnist1d.data.get_dataset_args()\n",
|
"args = mnist1d.data.get_dataset_args()\n",
|
||||||
"data = mnist1d.data.get_dataset(args, path='./sample_data/mnist1d_data.pkl', download=False, regenerate=False)\n",
|
"data = mnist1d.data.get_dataset(args, path='./sample_data/mnist1d_data.pkl', download=False, regenerate=False)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -136,7 +138,6 @@
|
|||||||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
|
||||||
"# object that decreases learning rate by half every 10 epochs\n",
|
"# object that decreases learning rate by half every 10 epochs\n",
|
||||||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
|
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
|
||||||
"# create 100 dummy data points and store in data loader class\n",
|
|
||||||
"x_train = torch.tensor(data['x'].astype('float32'))\n",
|
"x_train = torch.tensor(data['x'].astype('float32'))\n",
|
||||||
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
||||||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
"# Draw the fitted function, together with uncertainty used to generate points\n",
|
||||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" fig,ax = plt.subplots()\n",
|
" fig,ax = plt.subplots()\n",
|
||||||
@@ -203,7 +203,7 @@
|
|||||||
"# Closed form solution\n",
|
"# Closed form solution\n",
|
||||||
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=3)\n",
|
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=3)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get prediction for model across graph grange\n",
|
"# Get prediction for model across graph range\n",
|
||||||
"x_model = np.linspace(0,1,100);\n",
|
"x_model = np.linspace(0,1,100);\n",
|
||||||
"y_model = network(x_model, beta, omega)\n",
|
"y_model = network(x_model, beta, omega)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -268,7 +268,7 @@
|
|||||||
"mean_model, std_model = get_model_mean_variance(n_data, n_datasets, n_hidden, sigma_func) ;\n",
|
"mean_model, std_model = get_model_mean_variance(n_data, n_datasets, n_hidden, sigma_func) ;\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot the results\n",
|
"# Plot the results\n",
|
||||||
"plot_function(x_func, y_func, x_data,y_data, x_model, mean_model, sigma_model=std_model)"
|
"plot_function(x_func, y_func, x_model=x_model, y_model=mean_model, sigma_model=std_model)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "Wxk64t2SoX9c"
|
"id": "Wxk64t2SoX9c"
|
||||||
@@ -302,7 +302,7 @@
|
|||||||
"sigma_func = 0.3\n",
|
"sigma_func = 0.3\n",
|
||||||
"n_hidden = 5\n",
|
"n_hidden = 5\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Set random seed so that get same result every time\n",
|
"# Set random seed so that we get the same result every time\n",
|
||||||
"np.random.seed(1)\n",
|
"np.random.seed(1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for c_hidden in range(len(hidden_variables)):\n",
|
"for c_hidden in range(len(hidden_variables)):\n",
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"gpuType": "T4",
|
"gpuType": "T4",
|
||||||
"authorship_tag": "ABX9TyN/KUpEObCKnHZ/4Onp5sHG",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -48,8 +47,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "fn9BP5N5TguP"
|
"id": "fn9BP5N5TguP"
|
||||||
@@ -124,7 +123,7 @@
|
|||||||
" D_k = n_hidden # Hidden dimensions\n",
|
" D_k = n_hidden # Hidden dimensions\n",
|
||||||
" D_o = 10 # Output dimensions\n",
|
" D_o = 10 # Output dimensions\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Define a model with two hidden layers of size 100\n",
|
" # Define a model with two hidden layers\n",
|
||||||
" # And ReLU activations between them\n",
|
" # And ReLU activations between them\n",
|
||||||
" model = nn.Sequential(\n",
|
" model = nn.Sequential(\n",
|
||||||
" nn.Linear(D_i, D_k),\n",
|
" nn.Linear(D_i, D_k),\n",
|
||||||
@@ -157,7 +156,6 @@
|
|||||||
" optimizer = torch.optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)\n",
|
" optimizer = torch.optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # create 100 dummy data points and store in data loader class\n",
|
|
||||||
" x_train = torch.tensor(data['x'].astype('float32'))\n",
|
" x_train = torch.tensor(data['x'].astype('float32'))\n",
|
||||||
" y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
" y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
|
||||||
" x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
" x_test= torch.tensor(data['x_test'].astype('float32'))\n",
|
||||||
|
|||||||
@@ -224,7 +224,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"You should see see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent. <br><br>\n",
|
"You should see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent. <br><br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The conclusion of all of this is that in high dimensions you should be sceptical of your intuitions about how things work. I have tried to visualize many things in one or two dimensions in the book, but you should also be sceptical about these visualizations!"
|
"The conclusion of all of this is that in high dimensions you should be sceptical of your intuitions about how things work. I have tried to visualize many things in one or two dimensions in the book, but you should also be sceptical about these visualizations!"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -178,7 +178,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"def draw_loss_function(compute_loss, data, model, my_colormap, phi_iters = None):\n",
|
"def draw_loss_function(compute_loss, data, model, my_colormap, phi_iters = None):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Make grid of intercept/slope values to plot\n",
|
" # Make grid of offset/frequency values to plot\n",
|
||||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||||
" # Compute loss for every set of parameters\n",
|
" # Compute loss for every set of parameters\n",
|
||||||
@@ -304,7 +304,7 @@
|
|||||||
"for c_step in range (n_steps):\n",
|
"for c_step in range (n_steps):\n",
|
||||||
" # Do gradient descent step\n",
|
" # Do gradient descent step\n",
|
||||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step(phi_all[:,c_step:c_step+1],data, model)\n",
|
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step(phi_all[:,c_step:c_step+1],data, model)\n",
|
||||||
" # Measure loss and draw model every 4th step\n",
|
" # Measure loss and draw model every 8th step\n",
|
||||||
" if c_step % 8 == 0:\n",
|
" if c_step % 8 == 0:\n",
|
||||||
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
" loss = compute_loss(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2])\n",
|
||||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||||
@@ -369,7 +369,7 @@
|
|||||||
"# Code to draw the regularization function\n",
|
"# Code to draw the regularization function\n",
|
||||||
"def draw_reg_function():\n",
|
"def draw_reg_function():\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Make grid of intercept/slope values to plot\n",
|
" # Make grid of offset/frequency values to plot\n",
|
||||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||||
" # Compute loss for every set of parameters\n",
|
" # Compute loss for every set of parameters\n",
|
||||||
@@ -399,7 +399,7 @@
|
|||||||
"# Code to draw loss function with regularization\n",
|
"# Code to draw loss function with regularization\n",
|
||||||
"def draw_loss_function_reg(data, model, lambda_, my_colormap, phi_iters = None):\n",
|
"def draw_loss_function_reg(data, model, lambda_, my_colormap, phi_iters = None):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Make grid of intercept/slope values to plot\n",
|
" # Make grid of offset/frequency values to plot\n",
|
||||||
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
" offsets_mesh, freqs_mesh = np.meshgrid(np.arange(-10,10.0,0.1), np.arange(2.5,22.5,0.1))\n",
|
||||||
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
" loss_mesh = np.zeros_like(freqs_mesh)\n",
|
||||||
" # Compute loss for every set of parameters\n",
|
" # Compute loss for every set of parameters\n",
|
||||||
@@ -512,7 +512,7 @@
|
|||||||
"for c_step in range (n_steps):\n",
|
"for c_step in range (n_steps):\n",
|
||||||
" # Do gradient descent step\n",
|
" # Do gradient descent step\n",
|
||||||
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step2(phi_all[:,c_step:c_step+1],lambda_, data, model)\n",
|
" phi_all[:,c_step+1:c_step+2] = gradient_descent_step2(phi_all[:,c_step:c_step+1],lambda_, data, model)\n",
|
||||||
" # Measure loss and draw model every 4th step\n",
|
" # Measure loss and draw model every 8th step\n",
|
||||||
" if c_step % 8 == 0:\n",
|
" if c_step % 8 == 0:\n",
|
||||||
" loss = compute_loss2(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2], lambda_)\n",
|
" loss = compute_loss2(data[0,:], data[1,:], model, phi_all[:,c_step+1:c_step+2], lambda_)\n",
|
||||||
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
" draw_model(data,model,phi_all[:,c_step+1], \"Iteration %d, loss = %f\"%(c_step+1,loss))\n",
|
||||||
@@ -528,7 +528,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"You should see that the gradient descent algorithm now finds the correct minimum. By applying a tiny bit of domain knowledge (the parameter phi0 tends to be near zero and the parameters phi1 tends to be near 12.5), we get a better solution. However, the cost is that this solution is slightly biased towards this prior knowledge."
|
"You should see that the gradient descent algorithm now finds the correct minimum. By applying a tiny bit of domain knowledge (the parameter phi0 tends to be near zero and the parameter phi1 tends to be near 12.5), we get a better solution. However, the cost is that this solution is slightly biased towards this prior knowledge."
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "wrszSLrqZG4k"
|
"id": "wrszSLrqZG4k"
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOR3WOJwfTlMD8eOLsPfPrz",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -140,7 +139,7 @@
|
|||||||
" fig.set_size_inches(7,7)\n",
|
" fig.set_size_inches(7,7)\n",
|
||||||
" ax.contourf(phi0mesh, phi1mesh, loss_function, 256, cmap=my_colormap);\n",
|
" ax.contourf(phi0mesh, phi1mesh, loss_function, 256, cmap=my_colormap);\n",
|
||||||
" ax.contour(phi0mesh, phi1mesh, loss_function, 20, colors=['#80808080'])\n",
|
" ax.contour(phi0mesh, phi1mesh, loss_function, 20, colors=['#80808080'])\n",
|
||||||
" ax.set_xlabel('$\\phi_{0}$'); ax.set_ylabel('$\\phi_{1}$')\n",
|
" ax.set_xlabel(r'$\\phi_{0}$'); ax.set_ylabel(r'$\\phi_{1}$')\n",
|
||||||
"\n",
|
"\n",
|
||||||
" if grad_path_typical_lr is not None:\n",
|
" if grad_path_typical_lr is not None:\n",
|
||||||
" ax.plot(grad_path_typical_lr[0,:], grad_path_typical_lr[1,:],'ro-')\n",
|
" ax.plot(grad_path_typical_lr[0,:], grad_path_typical_lr[1,:],'ro-')\n",
|
||||||
@@ -310,7 +309,7 @@
|
|||||||
"grad_path_tiny_lr = None ;\n",
|
"grad_path_tiny_lr = None ;\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# TODO: Run the gradient descent on the modified loss\n",
|
"# TODO: Run the gradient descent on the unmodified loss\n",
|
||||||
"# function with 100 steps and a very small learning rate alpha of 0.05\n",
|
"# function with 100 steps and a very small learning rate alpha of 0.05\n",
|
||||||
"# Replace this line:\n",
|
"# Replace this line:\n",
|
||||||
"grad_path_typical_lr = None ;\n",
|
"grad_path_typical_lr = None ;\n",
|
||||||
|
|||||||
@@ -52,7 +52,7 @@
|
|||||||
"# import libraries\n",
|
"# import libraries\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"# Define seed so get same results each time\n",
|
"# Define seed to get same results each time\n",
|
||||||
"np.random.seed(1)"
|
"np.random.seed(1)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -80,7 +80,7 @@
|
|||||||
" for i in range(n_data):\n",
|
" for i in range(n_data):\n",
|
||||||
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
|
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # y value from running through functoin and adding noise\n",
|
" # y value from running through function and adding noise\n",
|
||||||
" y = np.ones(n_data)\n",
|
" y = np.ones(n_data)\n",
|
||||||
" for i in range(n_data):\n",
|
" for i in range(n_data):\n",
|
||||||
" y[i] = true_function(x[i])\n",
|
" y[i] = true_function(x[i])\n",
|
||||||
@@ -96,7 +96,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
"# Draw the fitted function, together with uncertainty used to generate points\n",
|
||||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" fig,ax = plt.subplots()\n",
|
" fig,ax = plt.subplots()\n",
|
||||||
@@ -137,7 +137,7 @@
|
|||||||
"n_data = 15\n",
|
"n_data = 15\n",
|
||||||
"x_data,y_data = generate_data(n_data, sigma_func)\n",
|
"x_data,y_data = generate_data(n_data, sigma_func)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Plot the functinon, data and uncertainty\n",
|
"# Plot the function, data and uncertainty\n",
|
||||||
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
|
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -216,7 +216,7 @@
|
|||||||
"# Closed form solution\n",
|
"# Closed form solution\n",
|
||||||
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=14)\n",
|
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=14)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Get prediction for model across graph grange\n",
|
"# Get prediction for model across graph range\n",
|
||||||
"x_model = np.linspace(0,1,100);\n",
|
"x_model = np.linspace(0,1,100);\n",
|
||||||
"y_model = network(x_model, beta, omega)\n",
|
"y_model = network(x_model, beta, omega)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -297,7 +297,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Plot the median of the results\n",
|
"# Plot the mean of the results\n",
|
||||||
"# TODO -- find the mean prediction\n",
|
"# TODO -- find the mean prediction\n",
|
||||||
"# Replace this line\n",
|
"# Replace this line\n",
|
||||||
"y_model_mean = all_y_model[0,:]\n",
|
"y_model_mean = all_y_model[0,:]\n",
|
||||||
|
|||||||
@@ -1,18 +1,16 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "el8l05WQEO46"
|
"id": "el8l05WQEO46"
|
||||||
@@ -38,7 +36,7 @@
|
|||||||
"# import libraries\n",
|
"# import libraries\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"# Define seed so get same results each time\n",
|
"# Define seed to get same results each time\n",
|
||||||
"np.random.seed(1)"
|
"np.random.seed(1)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -87,7 +85,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Draw the fitted function, together win uncertainty used to generate points\n",
|
"# Draw the fitted function, together with uncertainty used to generate points\n",
|
||||||
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" fig,ax = plt.subplots()\n",
|
" fig,ax = plt.subplots()\n",
|
||||||
@@ -159,7 +157,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "i8T_QduzeBmM"
|
"id": "i8T_QduzeBmM"
|
||||||
@@ -195,7 +192,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "JojV6ueRk49G"
|
"id": "JojV6ueRk49G"
|
||||||
@@ -211,7 +207,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "YX0O_Ciwp4W1"
|
"id": "YX0O_Ciwp4W1"
|
||||||
@@ -225,7 +220,7 @@
|
|||||||
" &\\propto&\\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
" &\\propto&\\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
||||||
"\\end{align}\n",
|
"\\end{align}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In fact, since this already a normal distribution, the constant of proportionality must be one and we can write\n",
|
"In fact, since this is already a normal distribution, the constant of proportionality must be one and we can write\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\\begin{align}\n",
|
"\\begin{align}\n",
|
||||||
" Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) &=& \\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
" Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) &=& \\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr].\n",
|
||||||
@@ -277,7 +272,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "GjPnlG4q0UFK"
|
"id": "GjPnlG4q0UFK"
|
||||||
@@ -334,7 +328,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "GiNg5EroUiUb"
|
"id": "GiNg5EroUiUb"
|
||||||
@@ -343,17 +336,16 @@
|
|||||||
"Now we need to perform inference for a new data points $\\mathbf{x}^*$ with corresponding hidden values $\\mathbf{h}^*$. Instead of having a single estimate of the parameters, we have a distribution over the possible parameters. So we marginalize (integrate) over this distribution to account for all possible values:\n",
|
"Now we need to perform inference for a new data points $\\mathbf{x}^*$ with corresponding hidden values $\\mathbf{h}^*$. Instead of having a single estimate of the parameters, we have a distribution over the possible parameters. So we marginalize (integrate) over this distribution to account for all possible values:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\\begin{align}\n",
|
"\\begin{align}\n",
|
||||||
"Pr(y^*|\\mathbf{x}^*) &=& \\int Pr(y^{*}|\\mathbf{x}^*,\\boldsymbol\\phi)Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) d\\boldsymbol\\phi\\\\\n",
|
"Pr(y^*|\\mathbf{x}^*) &= \\int Pr(y^{*}|\\mathbf{x}^*,\\boldsymbol\\phi)Pr(\\boldsymbol\\phi|\\{\\mathbf{x}_{i},\\mathbf{y}_{i}\\}) d\\boldsymbol\\phi\\\\\n",
|
||||||
"&=& \\int \\text{Norm}_{y^*}\\bigl[[\\mathbf{h}^{*T},1]\\boldsymbol\\phi,\\sigma^2\\bigr]\\cdot\\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr]d\\boldsymbol\\phi\\\\\n",
|
"&= \\int \\text{Norm}_{y^*}\\bigl[[\\mathbf{h}^{*T},1]\\boldsymbol\\phi,\\sigma^2\\bigr]\\cdot\\text{Norm}_{\\boldsymbol\\phi}\\biggl[\\frac{1}{\\sigma^2}\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y},\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\biggr]d\\boldsymbol\\phi\\\\\n",
|
||||||
"&=& \\text{Norm}_{y^*}\\biggl[\\frac{1}{\\sigma^2} [\\mathbf{h}^{*T},1]\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y}, [\\mathbf{h}^{*T},1]\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\n",
|
"&= \\text{Norm}_{y^*}\\biggl[\\frac{1}{\\sigma^2} [\\mathbf{h}^{*T},1]\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\\mathbf{H}\\mathbf{y}, [\\mathbf{h}^{*T},1]\\left(\\frac{1}{\\sigma^2}\\mathbf{H}\\mathbf{H}^T+\\frac{1}{\\sigma_p^2}\\mathbf{I}\\right)^{-1}\n",
|
||||||
"[\\mathbf{h}^*;1]\\biggr]\n",
|
"[\\mathbf{h}^*;1]\\biggr],\n",
|
||||||
"\\end{align}\n",
|
"\\end{align}\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"where the notation $[\\mathbf{h}^{*T},1]$ is a row vector containing $\\mathbf{h}^{T}$ with a one appended to the end and $[\\mathbf{h};1 ]$ is a column vector containing $\\mathbf{h}$ with a one appended to the end.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"To compute this, we reformulated the integrand using the relations from appendices C.3.3 and C.3.4 as the product of a normal distribution in $\\boldsymbol\\phi$ and a constant with respect\n",
|
||||||
"To compute this, we reformulated the integrand using the relations from appendices\n",
|
|
||||||
"C.3.3 and C.3.4 as the product of a normal distribution in $\\boldsymbol\\phi$ and a constant with respect\n",
|
|
||||||
"to $\\boldsymbol\\phi$. The integral of the normal distribution must be one, and so the final result is just the constant. This constant is itself a normal distribution in $y^*$. <br>\n",
|
"to $\\boldsymbol\\phi$. The integral of the normal distribution must be one, and so the final result is just the constant. This constant is itself a normal distribution in $y^*$. <br>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If you feel so inclined you can work through the math of this yourself.\n",
|
"If you feel so inclined you can work through the math of this yourself.\n",
|
||||||
@@ -404,7 +396,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "8Hcbe_16sK0F"
|
"id": "8Hcbe_16sK0F"
|
||||||
@@ -419,9 +410,8 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"authorship_tag": "ABX9TyMB8B4269DVmrcLoCWrhzKF",
|
"provenance": [],
|
||||||
"include_colab_link": true,
|
"include_colab_link": true
|
||||||
"provenance": []
|
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyM38ZVBK4/xaHk5Ys5lF6dN",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -44,8 +43,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "syvgxgRr3myY"
|
"id": "syvgxgRr3myY"
|
||||||
@@ -95,7 +94,7 @@
|
|||||||
"D_k = 200 # Hidden dimensions\n",
|
"D_k = 200 # Hidden dimensions\n",
|
||||||
"D_o = 10 # Output dimensions\n",
|
"D_o = 10 # Output dimensions\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Define a model with two hidden layers of size 100\n",
|
"# Define a model with two hidden layers of size 200\n",
|
||||||
"# And ReLU activations between them\n",
|
"# And ReLU activations between them\n",
|
||||||
"model = nn.Sequential(\n",
|
"model = nn.Sequential(\n",
|
||||||
"nn.Linear(D_i, D_k),\n",
|
"nn.Linear(D_i, D_k),\n",
|
||||||
@@ -186,7 +185,7 @@
|
|||||||
"ax.plot(errors_test,'b-',label='test')\n",
|
"ax.plot(errors_test,'b-',label='test')\n",
|
||||||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n",
|
||||||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n",
|
||||||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train[-1],errors_test[-1]))\n",
|
"ax.set_title('Train Error %3.2f, Test Error %3.2f'%(errors_train[-1],errors_test[-1]))\n",
|
||||||
"ax.legend()\n",
|
"ax.legend()\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
],
|
],
|
||||||
@@ -233,7 +232,7 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"n_data_orig = data['x'].shape[0]\n",
|
"n_data_orig = data['x'].shape[0]\n",
|
||||||
"# We'll double the amount o fdata\n",
|
"# We'll double the amount of data\n",
|
||||||
"n_data_augment = n_data_orig+4000\n",
|
"n_data_augment = n_data_orig+4000\n",
|
||||||
"augmented_x = np.zeros((n_data_augment, D_i))\n",
|
"augmented_x = np.zeros((n_data_augment, D_i))\n",
|
||||||
"augmented_y = np.zeros(n_data_augment)\n",
|
"augmented_y = np.zeros(n_data_augment)\n",
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyNJodaaCLMRWL9vTl8B/iLI",
|
"authorship_tag": "ABX9TyNb46PJB/CC1pcHGfjpUUZg",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -45,8 +45,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -301,7 +301,7 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Define 2 by 2 original patch\n",
|
"# Define 2 by 2 original patch\n",
|
||||||
"orig_2_2 = np.array([[2, 4], [4,8]])\n",
|
"orig_2_2 = np.array([[6, 8], [8,4]])\n",
|
||||||
"print(orig_2_2)"
|
"print(orig_2_2)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyMrF4rB2hTKq7XzLuYsURdL",
|
"authorship_tag": "ABX9TyP3VmRg51U+7NCfSYjRRrgv",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -235,7 +235,7 @@
|
|||||||
"# Finite difference calculation\n",
|
"# Finite difference calculation\n",
|
||||||
"dydx_fd = (y2-y1)/delta\n",
|
"dydx_fd = (y2-y1)/delta\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"Gradient calculation=%f, Finite difference gradient=%f\"%(dydx,dydx_fd))\n"
|
"print(\"Gradient calculation=%f, Finite difference gradient=%f\"%(dydx.squeeze(),dydx_fd.squeeze()))\n"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "KJpQPVd36Haq"
|
"id": "KJpQPVd36Haq"
|
||||||
@@ -267,8 +267,8 @@
|
|||||||
" fig,ax = plt.subplots()\n",
|
" fig,ax = plt.subplots()\n",
|
||||||
" ax.plot(np.squeeze(x_in), np.squeeze(dydx), 'b-')\n",
|
" ax.plot(np.squeeze(x_in), np.squeeze(dydx), 'b-')\n",
|
||||||
" ax.set_xlim(-2,2)\n",
|
" ax.set_xlim(-2,2)\n",
|
||||||
" ax.set_xlabel('Input, $x$')\n",
|
" ax.set_xlabel(r'Input, $x$')\n",
|
||||||
" ax.set_ylabel('Gradient, $dy/dx$')\n",
|
" ax.set_ylabel(r'Gradient, $dy/dx$')\n",
|
||||||
" ax.set_title('No layers = %d'%(K))\n",
|
" ax.set_title('No layers = %d'%(K))\n",
|
||||||
" plt.show()"
|
" plt.show()"
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyMXS3SPB4cS/4qxix0lH/Hq",
|
"authorship_tag": "ABX9TyNIY8tswL9e48d5D53aSmHO",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -45,8 +45,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyPVeAd3eDpEOCFh8CVyr1zz",
|
"authorship_tag": "ABX9TyPx2mM2zTHmDJeKeiE1RymT",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -45,8 +45,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyMSk8qTqDYqFnRJVZKlsue0",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -29,7 +28,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
"source": [
|
||||||
"# **Notebook 12.1: Multhead Self-Attention**\n",
|
"# **Notebook 12.1: Multihead Self-Attention**\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This notebook builds a multihead self-attention mechanism as in figure 12.6\n",
|
"This notebook builds a multihead self-attention mechanism as in figure 12.6\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -147,9 +146,7 @@
|
|||||||
" exp_values = np.exp(data_in) ;\n",
|
" exp_values = np.exp(data_in) ;\n",
|
||||||
" # Sum over columns\n",
|
" # Sum over columns\n",
|
||||||
" denom = np.sum(exp_values, axis = 0);\n",
|
" denom = np.sum(exp_values, axis = 0);\n",
|
||||||
" # Replicate denominator to N rows\n",
|
" # Compute softmax (numpy broadcasts denominator to all rows automatically)\n",
|
||||||
" denom = np.matmul(np.ones((data_in.shape[0],1)), denom[np.newaxis,:])\n",
|
|
||||||
" # Compute softmax\n",
|
|
||||||
" softmax = exp_values / denom\n",
|
" softmax = exp_values / denom\n",
|
||||||
" # return the answer\n",
|
" # return the answer\n",
|
||||||
" return softmax"
|
" return softmax"
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOMSGUFWT+YN0fwYHpMmHJM",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -99,7 +98,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# TODO -- Define node matrix\n",
|
"# TODO -- Define node matrix\n",
|
||||||
"# There will be 9 nodes and 118 possible chemical elements\n",
|
"# There will be 9 nodes and 118 possible chemical elements\n",
|
||||||
"# so we'll define a 9x118 matrix. Each column represents one\n",
|
"# so we'll define a 118x9 matrix. Each column represents one\n",
|
||||||
"# node and is a one-hot vector (i.e. all zeros, except a single one at the\n",
|
"# node and is a one-hot vector (i.e. all zeros, except a single one at the\n",
|
||||||
"# chemical number of the element).\n",
|
"# chemical number of the element).\n",
|
||||||
"# Chemical numbers: Hydrogen-->1, Carbon-->6, Oxygen-->8\n",
|
"# Chemical numbers: Hydrogen-->1, Carbon-->6, Oxygen-->8\n",
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOdSkjfQnSZXnffGsZVM7r5",
|
"authorship_tag": "ABX9TyO/wJ4N9w01f04mmrs/ZSHY",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -185,10 +185,10 @@
|
|||||||
"np.set_printoptions(precision=3)\n",
|
"np.set_printoptions(precision=3)\n",
|
||||||
"output = graph_attention(X, omega, beta, phi, A);\n",
|
"output = graph_attention(X, omega, beta, phi, A);\n",
|
||||||
"print(\"Correct answer is:\")\n",
|
"print(\"Correct answer is:\")\n",
|
||||||
"print(\"[[1.796 1.346 0.569 1.703 1.298 1.224 1.24 1.234]\")\n",
|
"print(\"[[0. 0.028 0.37 0. 0.97 0. 0. 0.698]\")\n",
|
||||||
"print(\" [0.768 0.672 0. 0.529 3.841 4.749 5.376 4.761]\")\n",
|
"print(\" [0. 0. 0. 0. 1.184 0. 2.654 0. ]\")\n",
|
||||||
"print(\" [0.305 0.129 0. 0.341 0.785 1.014 1.113 1.024]\")\n",
|
"print(\" [1.13 0.564 0. 1.298 0.268 0. 0. 0.779]\")\n",
|
||||||
"print(\" [0. 0. 0. 0. 0.35 0.864 1.098 0.871]]]\")\n",
|
"print(\" [0.825 0. 0. 1.175 0. 0. 0. 0. ]]]\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"Your answer is:\")\n",
|
"print(\"Your answer is:\")\n",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyM0StKV3FIZ3MZqfflqC0Rv",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -339,7 +338,7 @@
|
|||||||
" print(\"Initial generator loss = \", compute_generator_loss(z, theta, phi0, phi1))\n",
|
" print(\"Initial generator loss = \", compute_generator_loss(z, theta, phi0, phi1))\n",
|
||||||
" for iter in range(n_iter):\n",
|
" for iter in range(n_iter):\n",
|
||||||
" # Get gradient\n",
|
" # Get gradient\n",
|
||||||
" dl_dtheta = compute_generator_gradient(x_real, x_syn, phi0, phi1)\n",
|
" dl_dtheta = compute_generator_gradient(z, theta, phi0, phi1)\n",
|
||||||
" # Take a gradient step (uphill, since we are trying to make synthesized data less well classified by discriminator)\n",
|
" # Take a gradient step (uphill, since we are trying to make synthesized data less well classified by discriminator)\n",
|
||||||
" theta = theta + alpha * dl_dtheta ;\n",
|
" theta = theta + alpha * dl_dtheta ;\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyNyLnpoXgKN+RGCuTUszCAZ",
|
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -129,7 +128,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"draw_2D_heatmap(dist_mat,'Distance $|i-j|$', my_colormap)"
|
"draw_2D_heatmap(dist_mat,r'Distance $|i-j|$', my_colormap)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "G0HFPBXyHT6V"
|
"id": "G0HFPBXyHT6V"
|
||||||
@@ -153,9 +152,9 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# TODO: Now construct the matrix A that has the initial distribution constraints\n",
|
"# TODO: Now construct the matrix A that has the initial distribution constraints\n",
|
||||||
"# so that Ap=b where p is the transport plan P vectorized rows first so p = np.flatten(P)\n",
|
"# so that A @ TPFlat=b where TPFlat is the transport plan TP vectorized rows first so TPFlat = np.flatten(TP)\n",
|
||||||
"# Replace this line:\n",
|
"# Replace this line:\n",
|
||||||
"A = np.zeros((20,100))\n"
|
"A = np.zeros((20,100))"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "7KrybL96IuNW"
|
"id": "7KrybL96IuNW"
|
||||||
@@ -197,8 +196,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"P = np.array(opt.x).reshape(10,10)\n",
|
"TP = np.array(opt.x).reshape(10,10)\n",
|
||||||
"draw_2D_heatmap(P,'Transport plan $\\mathbf{P}$', my_colormap)"
|
"draw_2D_heatmap(TP,r'Transport plan $\\mathbf{P}$', my_colormap)"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "nZGfkrbRV_D0"
|
"id": "nZGfkrbRV_D0"
|
||||||
@@ -218,8 +217,9 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"was = np.sum(P * dist_mat)\n",
|
"was = np.sum(TP * dist_mat)\n",
|
||||||
"print(\"Wasserstein distance = \", was)"
|
"print(\"Your Wasserstein distance = \", was)\n",
|
||||||
|
"print(\"Correct answer = 0.15148578811369506\")"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "yiQ_8j-Raq3c"
|
"id": "yiQ_8j-Raq3c"
|
||||||
|
|||||||
@@ -1,18 +1,16 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "t9vk9Elugvmi"
|
"id": "t9vk9Elugvmi"
|
||||||
@@ -40,7 +38,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "paLz5RukZP1J"
|
"id": "paLz5RukZP1J"
|
||||||
@@ -114,7 +111,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "r5Hl2QkimWx9"
|
"id": "r5Hl2QkimWx9"
|
||||||
@@ -139,13 +135,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"fig,ax = plt.subplots()\n",
|
"fig,ax = plt.subplots()\n",
|
||||||
"ax.plot(phi_vals, expected_vals,'r-')\n",
|
"ax.plot(phi_vals, expected_vals,'r-')\n",
|
||||||
"ax.set_xlabel('Parameter $\\phi$')\n",
|
"ax.set_xlabel(r'Parameter $\\phi$')\n",
|
||||||
"ax.set_ylabel('$\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
"ax.set_ylabel(r'$\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "zTCykVeWqj_O"
|
"id": "zTCykVeWqj_O"
|
||||||
@@ -253,13 +248,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"fig,ax = plt.subplots()\n",
|
"fig,ax = plt.subplots()\n",
|
||||||
"ax.plot(phi_vals, deriv_vals,'r-')\n",
|
"ax.plot(phi_vals, deriv_vals,'r-')\n",
|
||||||
"ax.set_xlabel('Parameter $\\phi$')\n",
|
"ax.set_xlabel(r'Parameter $\\phi$')\n",
|
||||||
"ax.set_ylabel('$\\partial/\\partial\\phi\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
"ax.set_ylabel(r'$\\partial/\\partial\\phi\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "ASu4yKSwAEYI"
|
"id": "ASu4yKSwAEYI"
|
||||||
@@ -269,7 +263,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "xoFR1wifc8-b"
|
"id": "xoFR1wifc8-b"
|
||||||
@@ -366,13 +359,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"fig,ax = plt.subplots()\n",
|
"fig,ax = plt.subplots()\n",
|
||||||
"ax.plot(phi_vals, deriv_vals,'r-')\n",
|
"ax.plot(phi_vals, deriv_vals,'r-')\n",
|
||||||
"ax.set_xlabel('Parameter $\\phi$')\n",
|
"ax.set_xlabel(r'Parameter $\\phi$')\n",
|
||||||
"ax.set_ylabel('$\\partial/\\partial\\phi\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
"ax.set_ylabel(r'$\\partial/\\partial\\phi\\mathbb{E}_{Pr(x|\\phi)}[f[x]]$')\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "1TWBiUC7bQSw"
|
"id": "1TWBiUC7bQSw"
|
||||||
@@ -403,7 +395,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "d-0tntSYdKPR"
|
"id": "d-0tntSYdKPR"
|
||||||
@@ -415,9 +406,8 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"authorship_tag": "ABX9TyOxO2/0DTH4n4zhC97qbagY",
|
"provenance": [],
|
||||||
"include_colab_link": true,
|
"include_colab_link": true
|
||||||
"provenance": []
|
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
|
|||||||
@@ -61,7 +61,7 @@
|
|||||||
"by drawing $I$ samples $y_i$ and using the formula:\n",
|
"by drawing $I$ samples $y_i$ and using the formula:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\\begin{equation}\n",
|
"\\begin{equation}\n",
|
||||||
"\\mathbb{E}_{y}\\Bigl[\\exp\\bigl[- (y-1)^4\\bigr]\\Bigr] \\approx \\frac{1}{I} \\sum_{i=1}^I \\exp\\bigl[-(y-1)^4 \\bigr]\n",
|
"\\mathbb{E}_{y}\\Bigl[\\exp\\bigl[- (y-1)^4\\bigr]\\Bigr] \\approx \\frac{1}{I} \\sum_{i=1}^I \\exp\\bigl[-(y_i-1)^4 \\bigr]\n",
|
||||||
"\\end{equation}"
|
"\\end{equation}"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -387,7 +387,7 @@
|
|||||||
"def compute_expectation2b(n_samples):\n",
|
"def compute_expectation2b(n_samples):\n",
|
||||||
" # TODO -- complete this function\n",
|
" # TODO -- complete this function\n",
|
||||||
" # 1. Draw n_samples from auxiliary distribution\n",
|
" # 1. Draw n_samples from auxiliary distribution\n",
|
||||||
" # 2. Compute f[y] for those samples\n",
|
" # 2. Compute f2[y] for those samples\n",
|
||||||
" # 3. Scale the results by pr_y / q_y\n",
|
" # 3. Scale the results by pr_y / q_y\n",
|
||||||
" # 4. Compute the mean of these weighted samples\n",
|
" # 4. Compute the mean of these weighted samples\n",
|
||||||
" # Replace this line\n",
|
" # Replace this line\n",
|
||||||
|
|||||||
@@ -3,8 +3,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab_type": "text",
|
"id": "view-in-github",
|
||||||
"id": "view-in-github"
|
"colab_type": "text"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||||
@@ -409,7 +409,7 @@
|
|||||||
" # 3. Compute pdf of this Gaussian at every x_plot_val\n",
|
" # 3. Compute pdf of this Gaussian at every x_plot_val\n",
|
||||||
" # 4. Weight Gaussian by probability at position x and by 0.01 to componensate for bin size\n",
|
" # 4. Weight Gaussian by probability at position x and by 0.01 to componensate for bin size\n",
|
||||||
" # 5. Accumulate weighted Gaussian in marginal at time t.\n",
|
" # 5. Accumulate weighted Gaussian in marginal at time t.\n",
|
||||||
" # 6. Multiply result by 0.01 to compensate for bin size\n",
|
"\n",
|
||||||
" # Replace this line:\n",
|
" # Replace this line:\n",
|
||||||
" marginal_at_time_t = marginal_at_time_t\n",
|
" marginal_at_time_t = marginal_at_time_t\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -454,9 +454,8 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"authorship_tag": "ABX9TyMpC8kgLnXx0XQBtwNAQ4jJ",
|
"provenance": [],
|
||||||
"include_colab_link": true,
|
"include_colab_link": true
|
||||||
"provenance": []
|
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3",
|
||||||
|
|||||||
@@ -1,20 +1,4 @@
|
|||||||
{
|
{
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 0,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"provenance": [],
|
|
||||||
"authorship_tag": "ABX9TyMWjsdr5SDwyzcDftnehlNo",
|
|
||||||
"include_colab_link": true
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python3",
|
|
||||||
"display_name": "Python 3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"name": "python"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@@ -28,6 +12,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "t9vk9Elugvmi"
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# **Notebook 19.3: Monte-Carlo methods**\n",
|
"# **Notebook 19.3: Monte-Carlo methods**\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -37,42 +24,49 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||||
],
|
"\n",
|
||||||
"metadata": {
|
"Thanks to [Akshil Patel](https://www.akshilpatel.com) and [Jessica Nicholson](https://jessicanicholson1.github.io) for their help in preparing this notebook."
|
||||||
"id": "t9vk9Elugvmi"
|
]
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"execution_count": null,
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"from PIL import Image"
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OLComQyvCIJ7"
|
"id": "OLComQyvCIJ7"
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
"outputs": [],
|
||||||
"outputs": []
|
"source": [
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from PIL import Image\n",
|
||||||
|
"\n",
|
||||||
|
"from IPython.display import clear_output\n",
|
||||||
|
"from time import sleep"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "ZsvrUszPLyEG"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get local copies of components of images\n",
|
"# Get local copies of components of images\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Empty.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Empty.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Hole.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Hole.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Fish.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Fish.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Penguin.png"
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Penguin.png"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "ZsvrUszPLyEG"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "Gq1HfJsHN3SB"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Ugly class that takes care of drawing pictures like in the book.\n",
|
"# Ugly class that takes care of drawing pictures like in the book.\n",
|
||||||
"# You can totally ignore this code!\n",
|
"# You can totally ignore this code!\n",
|
||||||
@@ -257,205 +251,281 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" plt.show()"
|
" plt.show()"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "Gq1HfJsHN3SB"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "eBQ7lTpJQBSe"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# We're going to work on the problem depicted in figure 19.10a\n",
|
"# We're going to work on the problem depicted in figure 19.10a\n",
|
||||||
"n_rows = 4; n_cols = 4\n",
|
"n_rows = 4; n_cols = 4\n",
|
||||||
"layout = np.zeros(n_rows * n_cols)\n",
|
"layout = np.zeros(n_rows * n_cols)\n",
|
||||||
"reward_structure = np.zeros(n_rows * n_cols)\n",
|
"reward_structure = np.zeros(n_rows * n_cols)\n",
|
||||||
"layout[9] = 1 ; reward_structure[9] = -2\n",
|
"layout[9] = 1 ; reward_structure[9] = -2 # Hole\n",
|
||||||
"layout[10] = 1; reward_structure[10] = -2\n",
|
"layout[10] = 1; reward_structure[10] = -2 # Hole\n",
|
||||||
"layout[14] = 1; reward_structure[14] = -2\n",
|
"layout[14] = 1; reward_structure[14] = -2 # Hole\n",
|
||||||
"layout[15] = 2; reward_structure[15] = 3\n",
|
"layout[15] = 2; reward_structure[15] = 3 # Fish\n",
|
||||||
"initial_state = 0\n",
|
"initial_state = 0\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, state = initial_state, rewards=reward_structure, draw_state_index = True)"
|
"mdp_drawer.draw(layout, state = initial_state, rewards=reward_structure, draw_state_index = True)"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "eBQ7lTpJQBSe"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"For clarity, the black numbers are the state number and the red numbers are the reward for being in that state. Note that the states are indexed from 0 rather than 1 as in the book to make the code neater."
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "6Vku6v_se2IG"
|
"id": "6Vku6v_se2IG"
|
||||||
}
|
},
|
||||||
|
"source": [
|
||||||
|
"For clarity, the black numbers are the state number and the red numbers are the reward for being in that state. Note that the states are indexed from 0 rather than 1 as in the book to make the code neater."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "Fhc6DzZNOjiC"
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's define the state transition function $Pr(s_{t+1}|s_{t},a)$ in full where $a$ is the actions. Here $a=0$ means try to go upward, $a=1$, right, $a=2$ down and $a=3$ right. However, the ice is slippery, so we don't always go the direction we want to.\n",
|
"Now let's define the state transition function $Pr(s_{t+1}|s_{t},a)$ in full where $a$ is the actions. Here $a=0$ means try to go upward, $a=1$, right, $a=2$ down and $a=3$ right. However, the ice is slippery, so we don't always go the direction we want to.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Note that as for the states, we've indexed the actions from zero (unlike in the book) so they map to the indices of arrays better"
|
"Note that as for the states, we've indexed the actions from zero (unlike in the book) so they map to the indices of arrays better"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "Fhc6DzZNOjiC"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "l7rT78BbOgTi"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"transition_probabilities_given_action0 = np.array(\\\n",
|
"transition_probabilities_given_action0 = np.array(\\\n",
|
||||||
"[[0.00 , 0.33, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.90, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.33, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.85, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.33, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.85, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.90, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.34, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.34, 0.00, 0.00, 0.17, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.17, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.75 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.10, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.25 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action1 = np.array(\\\n",
|
"transition_probabilities_given_action1 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.10, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.75 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.85, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.85, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.85, 0.90, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.25 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.00, 0.50, 0.00, 0.17, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.10, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.75, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action2 = np.array(\\\n",
|
"transition_probabilities_given_action2 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.10, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.25 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.10, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.75 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.85, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.75, 0.00, 0.00, 0.16, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.33, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.25, 0.00, 0.00, 0.33, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.00, 0.00, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.90, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.85, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.34, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.34, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action3 = np.array(\\\n",
|
"transition_probabilities_given_action3 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.90, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.05, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.75, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.05, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.10, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.00, 0.33, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.33, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.17, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.25 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.90, 0.85, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.75 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Store all of these in a three dimension array\n",
|
"# Store all of these in a three dimension array\n",
|
||||||
"# Pr(s_{t+1}=2|s_{t}=1, a_{t}=3] is stored at position [2,1,3]\n",
|
"# Pr(s_{t+1}=2|s_{t}=1, a_{t}=3] is stored at position [2,1,3]\n",
|
||||||
"transition_probabilities_given_action = np.concatenate((np.expand_dims(transition_probabilities_given_action0,2),\n",
|
"transition_probabilities_given_action = np.concatenate((np.expand_dims(transition_probabilities_given_action0,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action1,2),\n",
|
" np.expand_dims(transition_probabilities_given_action1,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action2,2),\n",
|
" np.expand_dims(transition_probabilities_given_action2,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action3,2)),axis=2)"
|
" np.expand_dims(transition_probabilities_given_action3,2)),axis=2)\n",
|
||||||
],
|
"\n",
|
||||||
"metadata": {
|
"print('Grid Size:', len(transition_probabilities_given_action[0]))\n",
|
||||||
"id": "l7rT78BbOgTi"
|
"print()\n",
|
||||||
|
"print('Transition Probabilities for when next state = 2:')\n",
|
||||||
|
"print(transition_probabilities_given_action[2])\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Transitions Probabilities for when next state = 2 and current state = 1')\n",
|
||||||
|
"print(transition_probabilities_given_action[2][1])\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Transitions Probabilities for when next state = 2 and current state = 1 and action = 3 (Left):')\n",
|
||||||
|
"print(transition_probabilities_given_action[2][1][3])"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
{
|
||||||
"outputs": []
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "BHWjp6Qq4tBF"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Implementation Details\n",
|
||||||
|
"\n",
|
||||||
|
"We provide the following methods:\n",
|
||||||
|
"\n",
|
||||||
|
"- **`markov_decision_process_step_stochastic`** - this function selects an action based on the stochastic policy for the current state, updates the state based on the transition probabilities associated with the chosen action, and returns the new state, the reward obtained for the new state, the chosen action, and whether the episode terminates.\n",
|
||||||
|
"\n",
|
||||||
|
"- **`get_one_episode`** - this function simulates an episode of agent-environment interaction. It returns the states, rewards, and actions seen in that episode, which we can then use to update the agent.\n",
|
||||||
|
"\n",
|
||||||
|
"- **`calculate_returns`** - this function calls on the **`calculate_return`** function that computes the discounted sum of rewards from a specific step, in a sequence of rewards.\n",
|
||||||
|
"\n",
|
||||||
|
"You have to implement the following methods:\n",
|
||||||
|
"\n",
|
||||||
|
"- **`deterministic_policy_to_epsilon_greedy`** - given a deterministic policy, where one action is chosen per state, this function computes the $\\epsilon$-greedy version of that policy, where each of the four actions has some nonzero probability of being selected per state. In each state, the probability of selecting each of the actions should sum to 1.\n",
|
||||||
|
"\n",
|
||||||
|
"- **`update_policy_mc`** - this function updates the action-value function using the Monte Carlo method. We use the rollout trajectories collected using `get_one_episode` to calculate the returns. Then update the action values towards the Monte Carlo estimate of the return for each state."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "akjrncMF-FkU"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This takes a single step from an MDP\n",
|
"# This takes a single step from an MDP\n",
|
||||||
"def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n",
|
"def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, terminal_states, stochastic_policy):\n",
|
||||||
" # Pick action\n",
|
" # Pick action\n",
|
||||||
" action = np.random.choice(a=np.arange(0,4,1),p=stochastic_policy[:,state])\n",
|
" action = np.random.choice(a=np.arange(0,4,1),p=stochastic_policy[:,state])\n",
|
||||||
|
"\n",
|
||||||
" # Update the state\n",
|
" # Update the state\n",
|
||||||
" new_state = np.random.choice(a=np.arange(0,transition_probabilities_given_action.shape[0]),p = transition_probabilities_given_action[:,state,action])\n",
|
" new_state = np.random.choice(a=np.arange(0,transition_probabilities_given_action.shape[0]),p = transition_probabilities_given_action[:,state,action])\n",
|
||||||
" # Return the reward\n",
|
" # Return the reward\n",
|
||||||
" reward = reward_structure[new_state]\n",
|
" reward = reward_structure[new_state]\n",
|
||||||
|
" is_terminal = new_state in [terminal_states]\n",
|
||||||
"\n",
|
"\n",
|
||||||
" return new_state, reward, action"
|
" return new_state, reward, action, is_terminal"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "akjrncMF-FkU"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"execution_count": null,
|
||||||
"# Run one episode and return actions, rewards, returns\n",
|
|
||||||
"def get_one_episode(initial_state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n",
|
|
||||||
"\n",
|
|
||||||
" max_steps = 1000\n",
|
|
||||||
" states = np.zeros(max_steps, dtype='uint8') ;\n",
|
|
||||||
" rewards = np.zeros(max_steps) ;\n",
|
|
||||||
" actions = np.zeros(max_steps, dtype='uint8') ;\n",
|
|
||||||
"\n",
|
|
||||||
" t = 0\n",
|
|
||||||
" states[t] = initial_state\n",
|
|
||||||
" # While haven't reached maximum number of steps\n",
|
|
||||||
" while t< max_steps:\n",
|
|
||||||
" # Keep stepping through MDP\n",
|
|
||||||
" states[t+1],rewards[t+1],actions[t] = markov_decision_process_step_stochastic(states[t], transition_probabilities_given_action, reward_structure, stochastic_policy)\n",
|
|
||||||
" # If we reach te:rminal state then quit\n",
|
|
||||||
" if states[t]==15:\n",
|
|
||||||
" break;\n",
|
|
||||||
" t+=1\n",
|
|
||||||
"\n",
|
|
||||||
" states = states[:t+1]\n",
|
|
||||||
" rewards = rewards[:t+1]\n",
|
|
||||||
" actions = actions[:t+1]\n",
|
|
||||||
"\n",
|
|
||||||
" return states, rewards, actions"
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "bFYvF9nAloIA"
|
"id": "bFYvF9nAloIA"
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
"outputs": [],
|
||||||
"outputs": []
|
"source": [
|
||||||
|
"# Run one episode and return actions, rewards, returns\n",
|
||||||
|
"def get_one_episode(initial_state, transition_probabilities_given_action, reward_structure, terminal_states, stochastic_policy):\n",
|
||||||
|
"\n",
|
||||||
|
" states = []\n",
|
||||||
|
" rewards = []\n",
|
||||||
|
" actions = []\n",
|
||||||
|
"\n",
|
||||||
|
" states.append(initial_state)\n",
|
||||||
|
" state = initial_state\n",
|
||||||
|
"\n",
|
||||||
|
" is_terminal = False\n",
|
||||||
|
" # While we haven't reached a terminal state\n",
|
||||||
|
" while not is_terminal:\n",
|
||||||
|
" # Keep stepping through MDP\n",
|
||||||
|
" state, reward, action, is_terminal = markov_decision_process_step_stochastic(state,\n",
|
||||||
|
" transition_probabilities_given_action,\n",
|
||||||
|
" reward_structure,\n",
|
||||||
|
" terminal_states,\n",
|
||||||
|
" stochastic_policy)\n",
|
||||||
|
" states.append(state)\n",
|
||||||
|
" rewards.append(reward)\n",
|
||||||
|
" actions.append(action)\n",
|
||||||
|
"\n",
|
||||||
|
" states = np.array(states, dtype=\"uint8\")\n",
|
||||||
|
" rewards = np.array(rewards)\n",
|
||||||
|
" actions = np.array(actions, dtype=\"uint8\")\n",
|
||||||
|
"\n",
|
||||||
|
" # If the episode was terminated early, we need to compute the return differently using r_{t+1} + gamma*V(s_{t+1})\n",
|
||||||
|
" return states, rewards, actions"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "qJhOrIId4tBF"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def visualize_one_episode(states, actions):\n",
|
||||||
|
" # Define actions for visualization\n",
|
||||||
|
" acts = ['up', 'right', 'down', 'left']\n",
|
||||||
|
"\n",
|
||||||
|
" # Iterate over the states and actions\n",
|
||||||
|
" for i in range(len(states)):\n",
|
||||||
|
"\n",
|
||||||
|
" if i == 0:\n",
|
||||||
|
" print('Starting State:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" elif i == len(states)-1:\n",
|
||||||
|
" print('Episode Done:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" else:\n",
|
||||||
|
" print('State', states[i-1])\n",
|
||||||
|
" a = actions[i]\n",
|
||||||
|
" print('Action:', acts[a])\n",
|
||||||
|
" print('Next State:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" # Visualize the current state using the MDP drawer\n",
|
||||||
|
" mdp_drawer.draw(layout, state=states[i], rewards=reward_structure, draw_state_index=True)\n",
|
||||||
|
" clear_output(True)\n",
|
||||||
|
"\n",
|
||||||
|
" # Pause for a short duration to allow observation\n",
|
||||||
|
" sleep(1.5)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "_AKwdtQQHzIK"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Convert deterministic policy (1x16) to an epsilon greedy stochastic policy (4x16)\n",
|
"# Convert deterministic policy (1x16) to an epsilon greedy stochastic policy (4x16)\n",
|
||||||
"def deterministic_policy_to_epsilon_greedy(policy, epsilon=0.1):\n",
|
"def deterministic_policy_to_epsilon_greedy(policy, epsilon=0.2):\n",
|
||||||
" # TODO -- write this function\n",
|
" # TODO -- write this function\n",
|
||||||
" # You should wind up with a 4x16 matrix, with epsilon/3 in every position except the real policy\n",
|
" # You should wind up with a 4x16 matrix, with epsilon/3 in every position except the real policy\n",
|
||||||
" # The columns should sum to one\n",
|
" # The columns should sum to one\n",
|
||||||
@@ -464,27 +534,27 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
" return stochastic_policy"
|
" return stochastic_policy"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "_AKwdtQQHzIK"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"Let's try generating an episode"
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OhVXw2Favo-w"
|
"id": "OhVXw2Favo-w"
|
||||||
}
|
},
|
||||||
|
"source": [
|
||||||
|
"Let's try generating an episode"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "5zQ1Oh9Zvnwt"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Set seed so random numbers always the same\n",
|
"# Set seed so random numbers always the same\n",
|
||||||
"np.random.seed(0)\n",
|
"np.random.seed(6)\n",
|
||||||
"# Print in compact form\n",
|
"# Print in compact form\n",
|
||||||
"np.set_printoptions(precision=3)\n",
|
"np.set_printoptions(precision=3)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -494,32 +564,55 @@
|
|||||||
"# Convert deterministic policy to stochastic\n",
|
"# Convert deterministic policy to stochastic\n",
|
||||||
"stochastic_policy = deterministic_policy_to_epsilon_greedy(policy)\n",
|
"stochastic_policy = deterministic_policy_to_epsilon_greedy(policy)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(\"Initial policy:\")\n",
|
"print(\"Initial Penguin Policy:\")\n",
|
||||||
"print(policy)\n",
|
"print(policy)\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Stochastic Penguin Policy:')\n",
|
||||||
|
"print(stochastic_policy)\n",
|
||||||
|
"print()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"initial_state = 5\n",
|
"initial_state = 5\n",
|
||||||
"states, rewards, actions = get_one_episode(initial_state,transition_probabilities_given_action, reward_structure, stochastic_policy)"
|
"terminal_states=[15]\n",
|
||||||
],
|
"states, rewards, actions = get_one_episode(initial_state,transition_probabilities_given_action, reward_structure, terminal_states, stochastic_policy)\n",
|
||||||
"metadata": {
|
"\n",
|
||||||
"id": "5zQ1Oh9Zvnwt"
|
"print('Initial Penguin Position:')\n",
|
||||||
},
|
"mdp_drawer.draw(layout, state = initial_state, rewards=reward_structure, draw_state_index = True)\n",
|
||||||
"execution_count": null,
|
"\n",
|
||||||
"outputs": []
|
"print('Total steps to termination:', len(states))\n",
|
||||||
},
|
"print('Final Reward:', np.sum(rewards))"
|
||||||
{
|
]
|
||||||
"cell_type": "markdown",
|
|
||||||
"source": [
|
|
||||||
"We'll need to calculate the returns (discounted cumulative reward) for each state action pair"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "nl6rtNffwhcU"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "KJH-UGKk4tBF"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#this visualizes the complete episode\n",
|
||||||
|
"visualize_one_episode(states, actions)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "nl6rtNffwhcU"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"We'll need to calculate the returns (discounted cumulative reward) for each state action pair"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "FxrItqGPLTq7"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"def calculate_returns(rewards, gamma):\n",
|
"def calculate_returns(rewards, gamma):\n",
|
||||||
" returns = np.zeros_like(rewards)\n",
|
" returns = np.zeros(len(rewards))\n",
|
||||||
" for c_return in range(len(returns)):\n",
|
" for c_return in range(len(returns)):\n",
|
||||||
" returns[c_return] = calculate_return(rewards[c_return:], gamma)\n",
|
" returns[c_return] = calculate_return(rewards[c_return:], gamma)\n",
|
||||||
" return returns\n",
|
" return returns\n",
|
||||||
@@ -529,26 +622,26 @@
|
|||||||
" for i in range(len(rewards)):\n",
|
" for i in range(len(rewards)):\n",
|
||||||
" return_val += rewards[i] * np.power(gamma, i)\n",
|
" return_val += rewards[i] * np.power(gamma, i)\n",
|
||||||
" return return_val"
|
" return return_val"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "FxrItqGPLTq7"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"This routine does the main work of the Monte Carlo method. We repeatedly rollout episods, calculate the returns. Then we figure out the average return for each state action pair, and choose the next policy as the action that has greatest state action value at each state."
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "DX1KfHRhzUOU"
|
"id": "DX1KfHRhzUOU"
|
||||||
}
|
},
|
||||||
|
"source": [
|
||||||
|
"This routine does the main work of the on-policy Monte Carlo method. We repeatedly rollout episods, calculate the returns. Then we figure out the average return for each state action pair, and choose the next policy as the action that has greatest state action value at each state."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "hCghcKlOJXSM"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"def update_policy_mc(initial_state, transition_probabilities_given_action, reward_structure, stochastic_policy, gamma, n_rollouts=1):\n",
|
"def update_policy_mc(initial_state, transition_probabilities_given_action, reward_structure, terminal_states, stochastic_policy, gamma, n_rollouts=1):\n",
|
||||||
" # Create two matrices to store total returns for each action/state pair and the\n",
|
" # Create two matrices to store total returns for each action/state pair and the\n",
|
||||||
" # number of times we observed that action/state pair\n",
|
" # number of times we observed that action/state pair\n",
|
||||||
" n_state = transition_probabilities_given_action.shape[0]\n",
|
" n_state = transition_probabilities_given_action.shape[0]\n",
|
||||||
@@ -574,18 +667,18 @@
|
|||||||
" state_action_values = state_action_returns_total/( state_action_count+0.00001)\n",
|
" state_action_values = state_action_returns_total/( state_action_count+0.00001)\n",
|
||||||
" policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
" policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
||||||
" return policy, state_action_values\n"
|
" return policy, state_action_values\n"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "hCghcKlOJXSM"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "8jWhDlkaKj7Q"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Set seed so random numbers always the same\n",
|
"# Set seed so random numbers always the same\n",
|
||||||
"np.random.seed(3)\n",
|
"np.random.seed(0)\n",
|
||||||
"# Print in compact form\n",
|
"# Print in compact form\n",
|
||||||
"np.set_printoptions(precision=3)\n",
|
"np.set_printoptions(precision=3)\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -597,32 +690,60 @@
|
|||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, policy = policy, rewards = reward_structure)\n",
|
"mdp_drawer.draw(layout, policy = policy, rewards = reward_structure)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"terminal_states = [15]\n",
|
||||||
"n_policy_update = 5\n",
|
"# Track all the policies so we can visualize them later\n",
|
||||||
|
"all_policies = []\n",
|
||||||
|
"n_policy_update = 2000\n",
|
||||||
"for c_policy_update in range(n_policy_update):\n",
|
"for c_policy_update in range(n_policy_update):\n",
|
||||||
" # Convert policy to stochastic\n",
|
" # Convert policy to stochastic\n",
|
||||||
" stochastic_policy = deterministic_policy_to_epsilon_greedy(policy)\n",
|
" stochastic_policy = deterministic_policy_to_epsilon_greedy(policy)\n",
|
||||||
" # Update policy by Monte Carlo method\n",
|
" # Update policy by Monte Carlo method\n",
|
||||||
" policy, state_action_values = update_policy_mc(initial_state, transition_probabilities_given_action, reward_structure, stochastic_policy, gamma, n_rollouts=1000)\n",
|
" policy, state_action_values = update_policy_mc(initial_state, transition_probabilities_given_action, reward_structure, terminal_states, stochastic_policy, gamma, n_rollouts=100)\n",
|
||||||
|
" all_policies.append(policy)\n",
|
||||||
|
"\n",
|
||||||
|
" # Print out 10 snapshots of progress\n",
|
||||||
|
" if (c_policy_update % (n_policy_update//10) == 0) or c_policy_update == n_policy_update - 1:\n",
|
||||||
" print(\"Updated policy\")\n",
|
" print(\"Updated policy\")\n",
|
||||||
" print(policy)\n",
|
" print(policy)\n",
|
||||||
" mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
" mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
" mdp_drawer.draw(layout, policy = policy, rewards = reward_structure, state_action_values=state_action_values)\n"
|
" mdp_drawer.draw(layout, policy = policy, rewards = reward_structure, state_action_values=state_action_values)\n",
|
||||||
],
|
"\n",
|
||||||
"metadata": {
|
"\n"
|
||||||
"id": "8jWhDlkaKj7Q"
|
]
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"You can see that the results are quite noisy, but there is a definite improvement from the initial policy."
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "j7Ny47kTEMzH"
|
"id": "j7Ny47kTEMzH"
|
||||||
}
|
},
|
||||||
}
|
"source": [
|
||||||
|
"You can see a definite improvement to the policy"
|
||||||
]
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"colab": {
|
||||||
|
"provenance": [],
|
||||||
|
"include_colab_link": true
|
||||||
|
},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 0
|
||||||
}
|
}
|
||||||
@@ -1,20 +1,4 @@
|
|||||||
{
|
{
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 0,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"provenance": [],
|
|
||||||
"authorship_tag": "ABX9TyNEAhORON7DFN1dZMhDK/PO",
|
|
||||||
"include_colab_link": true
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python3",
|
|
||||||
"display_name": "Python 3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"name": "python"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@@ -28,6 +12,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "t9vk9Elugvmi"
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# **Notebook 19.4: Temporal difference methods**\n",
|
"# **Notebook 19.4: Temporal difference methods**\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -35,42 +22,49 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions."
|
"Contact me at udlbookmail@gmail.com if you find any mistakes or have any suggestions.\n",
|
||||||
],
|
"\n",
|
||||||
"metadata": {
|
"Thanks to [Akshil Patel](https://www.akshilpatel.com) and [Jessica Nicholson](https://jessicanicholson1.github.io) for their help in preparing this notebook."
|
||||||
"id": "t9vk9Elugvmi"
|
]
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"execution_count": null,
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"from PIL import Image"
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "OLComQyvCIJ7"
|
"id": "OLComQyvCIJ7"
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
"outputs": [],
|
||||||
"outputs": []
|
"source": [
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from PIL import Image\n",
|
||||||
|
"from IPython.display import clear_output\n",
|
||||||
|
"from time import sleep\n",
|
||||||
|
"from copy import deepcopy"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "ZsvrUszPLyEG"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Get local copies of components of images\n",
|
"# Get local copies of components of images\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Empty.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Empty.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Hole.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Hole.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Fish.png\n",
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Fish.png\n",
|
||||||
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Penguin.png"
|
"!wget https://raw.githubusercontent.com/udlbook/udlbook/main/Notebooks/Chap19/Penguin.png"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "ZsvrUszPLyEG"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "Gq1HfJsHN3SB"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Ugly class that takes care of drawing pictures like in the book.\n",
|
"# Ugly class that takes care of drawing pictures like in the book.\n",
|
||||||
"# You can totally ignore this code!\n",
|
"# You can totally ignore this code!\n",
|
||||||
@@ -253,269 +247,516 @@
|
|||||||
" self.draw_text(\"%2.2f\"%(state_action_values[3, c_cell]), np.floor(c_cell/self.n_col), c_cell-np.floor(c_cell/self.n_col)*self.n_col,'lc','black')\n",
|
" self.draw_text(\"%2.2f\"%(state_action_values[3, c_cell]), np.floor(c_cell/self.n_col), c_cell-np.floor(c_cell/self.n_col)*self.n_col,'lc','black')\n",
|
||||||
"\n",
|
"\n",
|
||||||
" plt.show()"
|
" plt.show()"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "Gq1HfJsHN3SB"
|
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
{
|
||||||
"outputs": []
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "JU8gX59o76xM"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"# Penguin Ice Environment\n",
|
||||||
|
"\n",
|
||||||
|
"In this implementation we have designed an icy gridworld that a penguin has to traverse to reach the fish found in the bottom right corner.\n",
|
||||||
|
"\n",
|
||||||
|
"## Environment Description\n",
|
||||||
|
"\n",
|
||||||
|
"Consider having to cross an icy surface to reach the yummy fish. In order to achieve this task as quickly as possible, the penguin needs to waddle along as fast as it can whilst simultaneously avoiding falling into the holes.\n",
|
||||||
|
"\n",
|
||||||
|
"In this icy environment the penguin is at one of the discrete cells in the gridworld. The agent starts each episode on a randomly chosen cell. The environment state dynamics are captured by the transition probabilities $Pr(s_{t+1} |s_t, a_t)$ where $s_t$ is the current state, $a_t$ is the action chosen, and $s_{t+1}$ is the next state at decision stage t. At each decision stage, the penguin can move in one of four directions: $a=0$ means try to go upward, $a=1$, right, $a=2$ down and $a=3$ left.\n",
|
||||||
|
"\n",
|
||||||
|
"However, the ice is slippery, so we don't always go the direction we want to: every time the agent chooses an action, with 0.25 probability, the environment changes the action taken to a differenct action, which is uniformly sampled from the other available actions.\n",
|
||||||
|
"\n",
|
||||||
|
"The rewards are deterministic; the penguin will receive a reward of +3 if it reaches the fish, -2 if it slips into a hole and 0 otherwise.\n",
|
||||||
|
"\n",
|
||||||
|
"Note that as for the states, we've indexed the actions from zero (unlike in the book) so they map to the indices of arrays better"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "eBQ7lTpJQBSe"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# We're going to work on the problem depicted in figure 19.10a\n",
|
"# We're going to work on the problem depicted in figure 19.10a\n",
|
||||||
"n_rows = 4; n_cols = 4\n",
|
"n_rows = 4; n_cols = 4\n",
|
||||||
"layout = np.zeros(n_rows * n_cols)\n",
|
"layout = np.zeros(n_rows * n_cols)\n",
|
||||||
"reward_structure = np.zeros(n_rows * n_cols)\n",
|
"reward_structure = np.zeros(n_rows * n_cols)\n",
|
||||||
"layout[9] = 1 ; reward_structure[9] = -2\n",
|
"layout[9] = 1 ; reward_structure[9] = -2 # Hole\n",
|
||||||
"layout[10] = 1; reward_structure[10] = -2\n",
|
"layout[10] = 1; reward_structure[10] = -2 # Hole\n",
|
||||||
"layout[14] = 1; reward_structure[14] = -2\n",
|
"layout[14] = 1; reward_structure[14] = -2 # Hole\n",
|
||||||
"layout[15] = 2; reward_structure[15] = 3\n",
|
"layout[15] = 2; reward_structure[15] = 3 # Fish\n",
|
||||||
"initial_state = 0\n",
|
"initial_state = 0\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, state = initial_state, rewards=reward_structure, draw_state_index = True)"
|
"mdp_drawer.draw(layout, state = initial_state, rewards=reward_structure, draw_state_index = True)"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "eBQ7lTpJQBSe"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"For clarity, the black numbers are the state number and the red numbers are the reward for being in that state. Note that the states are indexed from 0 rather than 1 as in the book to make the code neater."
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "6Vku6v_se2IG"
|
"id": "6Vku6v_se2IG"
|
||||||
}
|
},
|
||||||
|
"source": [
|
||||||
|
"For clarity, the black numbers are the state number and the red numbers are the reward for being in that state. Note that the states are indexed from 0 rather than 1 as in the book to make the code neater."
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "Fhc6DzZNOjiC"
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"Now let's define the state transition function $Pr(s_{t+1}|s_{t},a)$ in full where $a$ is the actions. Here $a=0$ means try to go upward, $a=1$, right, $a=2$ down and $a=3$ right. However, the ice is slippery, so we don't always go the direction we want to.\n",
|
"Now let's define the state transition function $Pr(s_{t+1}|s_{t},a)$ in full where $a$ is the actions. Here $a=0$ means try to go upward, $a=1$, right, $a=2$ down and $a=3$ right. However, the ice is slippery, so we don't always go the direction we want to.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Note that as for the states, we've indexed the actions from zero (unlike in the book) so they map to the indices of arrays better"
|
"Note that as for the states, we've indexed the actions from zero (unlike in the book) so they map to the indices of arrays better"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "Fhc6DzZNOjiC"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "wROjgnqh76xN"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"transition_probabilities_given_action0 = np.array(\\\n",
|
"transition_probabilities_given_action0 = np.array(\\\n",
|
||||||
"[[0.00 , 0.33, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.90, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.33, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.85, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.33, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.85, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.90, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.34, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.34, 0.00, 0.00, 0.17, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.17, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.75 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.10, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.25 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action1 = np.array(\\\n",
|
"transition_probabilities_given_action1 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.10, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.75 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.85, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.85, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.85, 0.90, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.25 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.00, 0.50, 0.00, 0.17, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.10, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.75, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.85, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action2 = np.array(\\\n",
|
"transition_probabilities_given_action2 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.10, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.25 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.10, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.75 , 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.85, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.75, 0.00, 0.00, 0.16, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.17, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00, 0.17, 0.00, 0.00, 0.33, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.25, 0.00, 0.00, 0.33, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.16, 0.00, 0.00, 0.00, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.33, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.00, 0.90, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.33, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.85, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.34, 0.00, 0.50 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.34, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"transition_probabilities_given_action3 = np.array(\\\n",
|
"transition_probabilities_given_action3 = np.array(\\\n",
|
||||||
"[[0.00 , 0.25, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
"[[0.90, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.05, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.50, 0.00, 0.75, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.05, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.10, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.50 , 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.33, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.05, 0.00, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.25, 0.00, 0.00, 0.33, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.50, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00, 0.50, 0.00, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.85, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.33, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.17, 0.00, 0.50, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00, 0.85, 0.00, 0.00, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.17, 0.00, 0.00, 0.00, 0.00, 0.25 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00, 0.00, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.34, 0.00, 0.00, 0.00, 0.00, 0.50, 0.00, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.00, 0.90, 0.85, 0.00, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.50, 0.00, 0.50, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.85, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.16, 0.00, 0.00, 0.25, 0.00, 0.75 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.05, 0.00],\n",
|
||||||
" [0.00 , 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00 ],\n",
|
" [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.05, 0.00, 0.00, 0.05, 0.00]])\n",
|
||||||
"])\n",
|
"\n",
|
||||||
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Store all of these in a three dimension array\n",
|
"# Store all of these in a three dimension array\n",
|
||||||
"# Pr(s_{t+1}=2|s_{t}=1, a_{t}=3] is stored at position [2,1,3]\n",
|
"# Pr(s_{t+1}=2|s_{t}=1, a_{t}=3] is stored at position [2,1,3]\n",
|
||||||
"transition_probabilities_given_action = np.concatenate((np.expand_dims(transition_probabilities_given_action0,2),\n",
|
"transition_probabilities_given_action = np.concatenate((np.expand_dims(transition_probabilities_given_action0,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action1,2),\n",
|
" np.expand_dims(transition_probabilities_given_action1,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action2,2),\n",
|
" np.expand_dims(transition_probabilities_given_action2,2),\n",
|
||||||
" np.expand_dims(transition_probabilities_given_action3,2)),axis=2)"
|
" np.expand_dims(transition_probabilities_given_action3,2)),axis=2)\n",
|
||||||
],
|
"\n",
|
||||||
"metadata": {
|
"print('Grid Size:', len(transition_probabilities_given_action[0]))\n",
|
||||||
"id": "l7rT78BbOgTi"
|
"print()\n",
|
||||||
|
"print('Transition Probabilities for when next state = 2:')\n",
|
||||||
|
"print(transition_probabilities_given_action[2])\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Transitions Probabilities for when next state = 2 and current state = 1')\n",
|
||||||
|
"print(transition_probabilities_given_action[2][1])\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Transitions Probabilities for when next state = 2 and current state = 1 and action = 3 (Left):')\n",
|
||||||
|
"print(transition_probabilities_given_action[2][1][3])"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
{
|
||||||
"outputs": []
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "eblSQ6xZ76xN"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Implementation Details\n",
|
||||||
|
"\n",
|
||||||
|
"We provide the following methods:\n",
|
||||||
|
"- **`markov_decision_process_step`** - this function simulates $Pr(s_{t+1} | s_{t}, a_{t})$. It randomly selects an action, updates the state based on the transition probabilities associated with the chosen action, and returns the new state, the reward obtained for leaving the current state, and the chosen action. The randomness in action selection and state transitions reflects a random exploration process and the stochastic nature of the MDP, respectively.\n",
|
||||||
|
"\n",
|
||||||
|
"- **`get_policy`** - this function computes a policy that acts greedily with respect to the state-action values. The policy is computed for all states and the action that maximizes the state-action value is chosen for each state. When there are multiple optimal actions, one is chosen at random.\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"You have to implement the following method:\n",
|
||||||
|
"\n",
|
||||||
|
"- **`q_learning_step`** - this function implements a single step of the Q-learning algorithm for reinforcement learning as shown below. The update follows the Q-learning formula and is controlled by parameters such as the learning rate (alpha) and the discount factor $(\\gamma)$. The function returns the updated state-action values matrix.\n",
|
||||||
|
"\n",
|
||||||
|
"$Q(s, a) \\leftarrow (1 - \\alpha) \\cdot Q(s, a) + \\alpha \\cdot \\left(r + \\gamma \\cdot \\max_{a'} Q(s', a')\\right)$"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "cKLn4Iam76xN"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"def q_learning_step(state_action_values, reward, state, new_state, action, gamma, alpha = 0.1):\n",
|
"def get_policy(state_action_values):\n",
|
||||||
|
" policy = np.zeros(state_action_values.shape[1]) # One action for each state\n",
|
||||||
|
" for state in range(state_action_values.shape[1]):\n",
|
||||||
|
" # Break ties for maximising actions randomly\n",
|
||||||
|
" policy[state] = np.random.choice(np.flatnonzero(state_action_values[:, state] == max(state_action_values[:, state])))\n",
|
||||||
|
" return policy"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "akjrncMF-FkU"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def markov_decision_process_step(state, transition_probabilities_given_action, reward_structure, terminal_states, action=None):\n",
|
||||||
|
" # Pick action\n",
|
||||||
|
" if action is None:\n",
|
||||||
|
" action = np.random.randint(4)\n",
|
||||||
|
" # Update the state\n",
|
||||||
|
" new_state = np.random.choice(a=range(transition_probabilities_given_action.shape[0]), p = transition_probabilities_given_action[:, state,action])\n",
|
||||||
|
"\n",
|
||||||
|
" # Return the reward -- here the reward is for arriving at the state\n",
|
||||||
|
" reward = reward_structure[new_state]\n",
|
||||||
|
" is_terminal = new_state in [terminal_states]\n",
|
||||||
|
"\n",
|
||||||
|
" return new_state, reward, action, is_terminal"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "5pO6-9ACWhiV"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def q_learning_step(state_action_values, reward, state, new_state, action, is_terminal, gamma, alpha = 0.1):\n",
|
||||||
" # TODO -- write this function\n",
|
" # TODO -- write this function\n",
|
||||||
" # Replace this line\n",
|
" # Replace this line\n",
|
||||||
" state_action_values_after = np.copy(state_action_values)\n",
|
" state_action_values_after = np.copy(state_action_values)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" return state_action_values_after"
|
" return state_action_values_after"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "5pO6-9ACWhiV"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "u4OHTTk176xO"
|
||||||
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# This takes a single step from an MDP which just has a completely random policy\n",
|
"Lets run this for a single Q-learning step"
|
||||||
"def markov_decision_process_step(state, transition_probabilities_given_action, reward_structure):\n",
|
]
|
||||||
" # Pick action\n",
|
|
||||||
" action = np.random.randint(4)\n",
|
|
||||||
" # Update the state\n",
|
|
||||||
" new_state = np.random.choice(a=np.arange(0,transition_probabilities_given_action.shape[0]),p = transition_probabilities_given_action[:,state,action])\n",
|
|
||||||
" # Return the reward -- here the reward is for leaving the state\n",
|
|
||||||
" reward = reward_structure[state]\n",
|
|
||||||
"\n",
|
|
||||||
" return new_state, reward, action"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "akjrncMF-FkU"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "Fu5_VjvbSwfJ"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Initialize the state-action values to random numbers\n",
|
"# Initialize the state-action values to random numbers\n",
|
||||||
"np.random.seed(0)\n",
|
"np.random.seed(0)\n",
|
||||||
"n_state = transition_probabilities_given_action.shape[0]\n",
|
"n_state = transition_probabilities_given_action.shape[0]\n",
|
||||||
"n_action = transition_probabilities_given_action.shape[2]\n",
|
"n_action = transition_probabilities_given_action.shape[2]\n",
|
||||||
|
"terminal_states=[15]\n",
|
||||||
"state_action_values = np.random.normal(size=(n_action, n_state))\n",
|
"state_action_values = np.random.normal(size=(n_action, n_state))\n",
|
||||||
|
"# Hard code value of termination state of finding fish to 0\n",
|
||||||
|
"state_action_values[:, terminal_states] = 0\n",
|
||||||
"gamma = 0.9\n",
|
"gamma = 0.9\n",
|
||||||
"\n",
|
"\n",
|
||||||
"policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
"policy = get_policy(state_action_values)\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n",
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Now let's simulate a single Q-learning step\n",
|
"# Now let's simulate a single Q-learning step\n",
|
||||||
"initial_state = 9\n",
|
"initial_state = 9\n",
|
||||||
"print(\"Initial state = \", initial_state)\n",
|
"print(\"Initial state =\",initial_state)\n",
|
||||||
"new_state, reward, action = markov_decision_process_step(initial_state, transition_probabilities_given_action, reward_structure)\n",
|
"new_state, reward, action, is_terminal = markov_decision_process_step(initial_state, transition_probabilities_given_action, reward_structure, terminal_states)\n",
|
||||||
"print(\"Action = \", action)\n",
|
"print(\"Action =\",action)\n",
|
||||||
"print(\"New state = \", new_state)\n",
|
"print(\"New state =\",new_state)\n",
|
||||||
"print(\"Reward = \", reward)\n",
|
"print(\"Reward =\", reward)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"state_action_values_after = q_learning_step(state_action_values, reward, initial_state, new_state, action, gamma)\n",
|
"state_action_values_after = q_learning_step(state_action_values, reward, initial_state, new_state, action, is_terminal, gamma)\n",
|
||||||
"print(\"Your value:\",state_action_values_after[action, initial_state])\n",
|
"print(\"Your value:\",state_action_values_after[action, initial_state])\n",
|
||||||
"print(\"True value: 0.27650262412468796\")\n",
|
"print(\"True value: 0.3024718977397814\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
"policy = get_policy(state_action_values)\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values_after, rewards = reward_structure)\n"
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values_after, rewards = reward_structure)\n"
|
||||||
],
|
]
|
||||||
"metadata": {
|
|
||||||
"id": "Fu5_VjvbSwfJ"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"source": [
|
|
||||||
"Now let's run this for a while and watch the policy improve"
|
|
||||||
],
|
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "Ogh0qucmb68J"
|
"id": "Ogh0qucmb68J"
|
||||||
}
|
},
|
||||||
|
"source": [
|
||||||
|
"Now let's run this for a while (20000) steps and watch the policy improve"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "N6gFYifh76xO"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Initialize the state-action values to random numbers\n",
|
"# Initialize the state-action values to random numbers\n",
|
||||||
"np.random.seed(0)\n",
|
"np.random.seed(0)\n",
|
||||||
"n_state = transition_probabilities_given_action.shape[0]\n",
|
"n_state = transition_probabilities_given_action.shape[0]\n",
|
||||||
"n_action = transition_probabilities_given_action.shape[2]\n",
|
"n_action = transition_probabilities_given_action.shape[2]\n",
|
||||||
"state_action_values = np.random.normal(size=(n_action, n_state))\n",
|
"state_action_values = np.random.normal(size=(n_action, n_state))\n",
|
||||||
"# Hard code termination state of finding fish\n",
|
"\n",
|
||||||
"state_action_values[:,n_state-1] = 3.0\n",
|
"# Hard code value of termination state of finding fish to 0\n",
|
||||||
|
"terminal_states = [15]\n",
|
||||||
|
"state_action_values[:, terminal_states] = 0\n",
|
||||||
"gamma = 0.9\n",
|
"gamma = 0.9\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Draw the initial setup\n",
|
"# Draw the initial setup\n",
|
||||||
"policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
"print('Initial Policy:')\n",
|
||||||
|
"policy = get_policy(state_action_values)\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n",
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"state = np.random.randint(n_state-1)\n",
|
||||||
"state= np.random.randint(n_state-1)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Run for a number of iterations\n",
|
"# Run for a number of iterations\n",
|
||||||
"for c_iter in range(10000):\n",
|
"for c_iter in range(20000):\n",
|
||||||
" new_state, reward, action = markov_decision_process_step(state, transition_probabilities_given_action, reward_structure)\n",
|
" new_state, reward, action, is_terminal = markov_decision_process_step(state, transition_probabilities_given_action, reward_structure, terminal_states)\n",
|
||||||
" state_action_values_after = q_learning_step(state_action_values, reward, state, new_state, action, gamma)\n",
|
" state_action_values_after = q_learning_step(state_action_values, reward, state, new_state, action, is_terminal, gamma)\n",
|
||||||
|
"\n",
|
||||||
" # If in termination state, reset state randomly\n",
|
" # If in termination state, reset state randomly\n",
|
||||||
" if new_state==15:\n",
|
" if is_terminal:\n",
|
||||||
" state= np.random.randint(n_state-1)\n",
|
" state = np.random.randint(n_state-1)\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" state = new_state\n",
|
" state = new_state\n",
|
||||||
" # Update the policy\n",
|
|
||||||
" state_action_values = np.copy(state_action_values_after)\n",
|
|
||||||
" policy = np.argmax(state_action_values, axis=0).astype(int)\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
|
" # Update the policy\n",
|
||||||
|
" state_action_values = deepcopy(state_action_values_after)\n",
|
||||||
|
" policy = get_policy(state_action_values_after)\n",
|
||||||
|
"\n",
|
||||||
|
"print('Final Optimal Policy:')\n",
|
||||||
"# Draw the final situation\n",
|
"# Draw the final situation\n",
|
||||||
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)"
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "djPTKuDk76xO"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"Finally, lets run this for a **single** episode and visualize the penguin's actions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "pWObQf2h76xO"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_one_episode(n_state, state_action_values, terminal_states, gamma):\n",
|
||||||
|
"\n",
|
||||||
|
" state = np.random.randint(n_state-1)\n",
|
||||||
|
"\n",
|
||||||
|
" # Create lists to store all the states seen and actions taken throughout the single episode\n",
|
||||||
|
" all_states = []\n",
|
||||||
|
" all_actions = []\n",
|
||||||
|
"\n",
|
||||||
|
" # Initalize episode termination flag\n",
|
||||||
|
" done = False\n",
|
||||||
|
" # Initialize counter for steps in the episode\n",
|
||||||
|
" steps = 0\n",
|
||||||
|
"\n",
|
||||||
|
" all_states.append(state)\n",
|
||||||
|
"\n",
|
||||||
|
" while not done:\n",
|
||||||
|
" steps += 1\n",
|
||||||
|
"\n",
|
||||||
|
" new_state, reward, action, is_terminal = markov_decision_process_step(state, transition_probabilities_given_action, reward_structure, terminal_states)\n",
|
||||||
|
" all_states.append(new_state)\n",
|
||||||
|
" all_actions.append(action)\n",
|
||||||
|
"\n",
|
||||||
|
" state_action_values_after = q_learning_step(state_action_values, reward, state, new_state, action, is_terminal, gamma)\n",
|
||||||
|
"\n",
|
||||||
|
" # If in termination state, reset state randomly\n",
|
||||||
|
" if is_terminal:\n",
|
||||||
|
" state = np.random.randint(n_state-1)\n",
|
||||||
|
" print(f'Episode Terminated at {steps} Steps')\n",
|
||||||
|
" # Set episode termination flag\n",
|
||||||
|
" done = True\n",
|
||||||
|
" else:\n",
|
||||||
|
" state = new_state\n",
|
||||||
|
"\n",
|
||||||
|
" # Update the policy\n",
|
||||||
|
" state_action_values = deepcopy(state_action_values_after)\n",
|
||||||
|
" policy = get_policy(state_action_values_after)\n",
|
||||||
|
"\n",
|
||||||
|
" return all_states, all_actions, policy, state_action_values\n",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "P7cbCGT176xO"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def visualize_one_episode(states, actions):\n",
|
||||||
|
" # Define actions for visualization\n",
|
||||||
|
" acts = ['up', 'right', 'down', 'left']\n",
|
||||||
|
"\n",
|
||||||
|
" # Iterate over the states and actions\n",
|
||||||
|
" for i in range(len(states)):\n",
|
||||||
|
"\n",
|
||||||
|
" if i == 0:\n",
|
||||||
|
" print('Starting State:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" elif i == len(states)-1:\n",
|
||||||
|
" print('Episode Done:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" else:\n",
|
||||||
|
" print('State', states[i-1])\n",
|
||||||
|
" a = actions[i]\n",
|
||||||
|
" print('Action:', acts[a])\n",
|
||||||
|
" print('Next State:', states[i])\n",
|
||||||
|
"\n",
|
||||||
|
" # Visualize the current state using the MDP drawer\n",
|
||||||
|
" mdp_drawer.draw(layout, state=states[i], rewards=reward_structure, draw_state_index=True)\n",
|
||||||
|
" clear_output(True)\n",
|
||||||
|
"\n",
|
||||||
|
" # Pause for a short duration to allow observation\n",
|
||||||
|
" sleep(1.5)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "cr98F8PT76xP"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Initialize the state-action values to random numbers\n",
|
||||||
|
"np.random.seed(2)\n",
|
||||||
|
"n_state = transition_probabilities_given_action.shape[0]\n",
|
||||||
|
"n_action = transition_probabilities_given_action.shape[2]\n",
|
||||||
|
"state_action_values = np.random.normal(size=(n_action, n_state))\n",
|
||||||
|
"\n",
|
||||||
|
"# Hard code value of termination state of finding fish to 0\n",
|
||||||
|
"terminal_states = [15]\n",
|
||||||
|
"state_action_values[:, terminal_states] = 0\n",
|
||||||
|
"gamma = 0.9\n",
|
||||||
|
"\n",
|
||||||
|
"# Draw the initial setup\n",
|
||||||
|
"print('Initial Policy:')\n",
|
||||||
|
"policy = get_policy(state_action_values)\n",
|
||||||
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n",
|
||||||
|
"\n",
|
||||||
|
"states, actions, policy, state_action_values = get_one_episode(n_state, state_action_values, terminal_states, gamma)\n",
|
||||||
|
"\n",
|
||||||
|
"print()\n",
|
||||||
|
"print('Final Optimal Policy:')\n",
|
||||||
|
"mdp_drawer = DrawMDP(n_rows, n_cols)\n",
|
||||||
|
"mdp_drawer.draw(layout, policy = policy, state_action_values = state_action_values, rewards = reward_structure)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"id": "5zBu1g3776xP"
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"visualize_one_episode(states, actions)"
|
||||||
|
]
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "qQFhwVqPcCFH"
|
"colab": {
|
||||||
|
"provenance": [],
|
||||||
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"execution_count": null,
|
"kernelspec": {
|
||||||
"outputs": []
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.12"
|
||||||
}
|
}
|
||||||
]
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 0
|
||||||
}
|
}
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyPkSYbEjOcEmLt8tU6HxNuR",
|
"authorship_tag": "ABX9TyNgBRvfIlngVobKuLE6leM+",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -45,8 +45,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"authorship_tag": "ABX9TyOo4vm4MXcIvAzVlMCaLikH",
|
"authorship_tag": "ABX9TyO6xuszaG4nNAcWy/3juLkn",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -44,8 +44,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
"colab": {
|
"colab": {
|
||||||
"provenance": [],
|
"provenance": [],
|
||||||
"gpuType": "T4",
|
"gpuType": "T4",
|
||||||
"authorship_tag": "ABX9TyMjPBfDONmjqTSyEQDP2gjY",
|
"authorship_tag": "ABX9TyOG/5A+P053/x1IfFg52z4V",
|
||||||
"include_colab_link": true
|
"include_colab_link": true
|
||||||
},
|
},
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
@@ -47,8 +47,8 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "D5yLObtZCi9J"
|
"id": "D5yLObtZCi9J"
|
||||||
|
|||||||
@@ -43,8 +43,8 @@
|
|||||||
"id": "Sg2i1QmhKW5d"
|
"id": "Sg2i1QmhKW5d"
|
||||||
},
|
},
|
||||||
"source": [
|
"source": [
|
||||||
"# Run this if you're in a Colab\n",
|
"# Run this if you're in a Colab to install MNIST 1D repository\n",
|
||||||
"!git clone https://github.com/greydanus/mnist1d"
|
"!pip install git+https://github.com/greydanus/mnist1d"
|
||||||
],
|
],
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"outputs": []
|
"outputs": []
|
||||||
|
|||||||
7
Notebooks/LICENSE (MIT)
Normal file
7
Notebooks/LICENSE (MIT)
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Copyright 2023 Simon Prince
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
34
README.md
Normal file
34
README.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Understanding Deep Learning
|
||||||
|
|
||||||
|
Understanding Deep Learning - Simon J.D. Prince
|
||||||
|
|
||||||
|
## Website
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Install dependencies
|
||||||
|
npm install
|
||||||
|
|
||||||
|
# Run the website in development mode
|
||||||
|
npm dev
|
||||||
|
|
||||||
|
# Build the website
|
||||||
|
npm build
|
||||||
|
|
||||||
|
# Preview the built website
|
||||||
|
npm preview
|
||||||
|
|
||||||
|
# Format the code
|
||||||
|
npm run format
|
||||||
|
|
||||||
|
# Lint the code
|
||||||
|
npm run lint
|
||||||
|
|
||||||
|
# Clean the repository
|
||||||
|
npm run clean
|
||||||
|
|
||||||
|
# Prepare to deploy the website
|
||||||
|
npm run predeploy
|
||||||
|
|
||||||
|
# Deploy the website
|
||||||
|
npm run deploy
|
||||||
|
```
|
||||||
Binary file not shown.
BIN
UDL_Errata.pdf
BIN
UDL_Errata.pdf
Binary file not shown.
423
index.html
423
index.html
@@ -1,406 +1,19 @@
|
|||||||
<!DOCTYPE html>
|
<!doctype html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8">
|
<meta charset="utf-8" />
|
||||||
<title>udlbook</title>
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<link rel="stylesheet" href="style.css">
|
<link rel="icon" type="image/x-icon" href="/favicon.ico" />
|
||||||
</head>
|
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
||||||
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
||||||
<body>
|
<link
|
||||||
<div id="head">
|
href="https://fonts.googleapis.com/css2?family=Encode+Sans+Expanded:wght@400;700&display=swap"
|
||||||
<div>
|
rel="stylesheet"
|
||||||
<h1 style="margin: 0; font-size: 36px">Understanding Deep Learning</h1>
|
/>
|
||||||
by Simon J.D. Prince
|
<title>Understanding Deep Learning</title>
|
||||||
<br>Published by MIT Press Dec 5th 2023.<br>
|
</head>
|
||||||
<ul>
|
<body>
|
||||||
<li>
|
<div id="root"></div>
|
||||||
<p style="font-size: larger; margin-bottom: 0">Download draft PDF Chapters 1-21 <a
|
<script type="module" src="/src/index.jsx"></script>
|
||||||
href="https://github.com/udlbook/udlbook/releases/download/v1.19/UnderstandingDeepLearning_16_12_23_C.pdf">here</a>
|
</body>
|
||||||
</p>2024-01-16. CC-BY-NC-ND license<br>
|
</html>
|
||||||
<img src="https://img.shields.io/github/downloads/udlbook/udlbook/total" alt="download stats shield">
|
|
||||||
</li>
|
|
||||||
<li> Order your copy from <a href="https://mitpress.mit.edu/9780262048644/understanding-deep-learning/">here </a></li>
|
|
||||||
<li> Known errata can be found here: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/UDL_Errata.pdf">PDF</a></li>
|
|
||||||
<li> Report new errata via <a href="https://github.com/udlbook/udlbook/issues">github</a>
|
|
||||||
or contact me directly at udlbookmail@gmail.com
|
|
||||||
<li> Follow me on <a href="https://twitter.com/SimonPrinceAI">Twitter</a> or <a
|
|
||||||
href="https://www.linkedin.com/in/simon-prince-615bb9165/">LinkedIn</a> for updates.
|
|
||||||
</ul>
|
|
||||||
<h2>Table of contents</h2>
|
|
||||||
<ul>
|
|
||||||
<li> Chapter 1 - Introduction
|
|
||||||
<li> Chapter 2 - Supervised learning
|
|
||||||
<li> Chapter 3 - Shallow neural networks
|
|
||||||
<li> Chapter 4 - Deep neural networks
|
|
||||||
<li> Chapter 5 - Loss functions
|
|
||||||
<li> Chapter 6 - Training models
|
|
||||||
<li> Chapter 7 - Gradients and initialization
|
|
||||||
<li> Chapter 8 - Measuring performance
|
|
||||||
<li> Chapter 9 - Regularization
|
|
||||||
<li> Chapter 10 - Convolutional networks
|
|
||||||
<li> Chapter 11 - Residual networks
|
|
||||||
<li> Chapter 12 - Transformers
|
|
||||||
<li> Chapter 13 - Graph neural networks
|
|
||||||
<li> Chapter 14 - Unsupervised learning
|
|
||||||
<li> Chapter 15 - Generative adversarial networks
|
|
||||||
<li> Chapter 16 - Normalizing flows
|
|
||||||
<li> Chapter 17 - Variational autoencoders
|
|
||||||
<li> Chapter 18 - Diffusion models
|
|
||||||
<li> Chapter 19 - Deep reinforcement learning
|
|
||||||
<li> Chapter 20 - Why does deep learning work?
|
|
||||||
<li> Chapter 21 - Deep learning and ethics
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
<div id="cover">
|
|
||||||
<img src="https://raw.githubusercontent.com/udlbook/udlbook/main/UDLCoverSmall.jpg"
|
|
||||||
alt="front cover">
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div id="body">
|
|
||||||
<h2>Resources for instructors </h2>
|
|
||||||
<p>Instructor answer booklet available with proof of credentials via <a
|
|
||||||
href="https://mitpress.mit.edu/9780262048644/understanding-deep-learning"> MIT Press</a>.</p>
|
|
||||||
<p>Request an exam/desk copy via <a href="https://mitpress.ublish.com/request?cri=15055">MIT Press</a>.</p>
|
|
||||||
<p>Figures in PDF (vector) / SVG (vector) / Powerpoint (images):
|
|
||||||
<ul>
|
|
||||||
<li> Chapter 1 - Introduction: <a href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap1PDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=1udnl5pUOAc8DcAQ7HQwyzP9pwL95ynnv">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1IjTqIUvWCJc71b5vEJYte-Dwujcp7rvG/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 2 - Supervised learning: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap2PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1VSxcU5y1qNFlmd3Lb3uOWyzILuOj1Dla"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1Br7R01ROtRWPlNhC_KOommeHAWMBpWtz/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 3 - Shallow neural networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap3PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=19kZFWlXhzN82Zx02ByMmSZOO4T41fmqI"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1e9M3jB5I9qZ4dCBY90Q3Hwft_i068QVQ/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 4 - Deep neural networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap4PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1ojr0ebsOhzvS04ItAflX2cVmYqHQHZUa"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1LTSsmY4mMrJbqXVvoTOCkQwHrRKoYnJj/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 5 - Loss functions: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap5PDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=17MJO7fiMpFZVqKeqXTbQ36AMpmR4GizZ">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1gcpC_3z9oRp87eMkoco-kdLD-MM54Puk/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 6 - Training models: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap6PDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=1VPdhFRnCr9_idTrX0UdHKGAw2shUuwhK">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1AKoeggAFBl9yLC7X5tushAGzCCxmB7EY/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 7 - Gradients and initialization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap7PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1TTl4gvrTvNbegnml4CoGoKOOd6O8-PGs"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/11zhB6PI-Dp6Ogmr4IcI6fbvbqNqLyYcz/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 8 - Measuring performance: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap8PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=19eQOnygd_l0DzgtJxXuYnWa4z7QKJrJx"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1SHRmJscDLUuQrG7tmysnScb3ZUAqVMZo/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 9 - Regularization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap9PDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=1LprgnUGL7xAM9-jlGZC9LhMPeefjY0r0">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1VwIfvjpdfTny6sEfu4ZETwCnw6m8Eg-5/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 10 - Convolutional networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap10PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1-Wb3VzaSvVeRzoUzJbI2JjZE0uwqupM9"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1MtfKBC4Y9hWwGqeP6DVwUNbi1j5ncQCg/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 11 - Residual networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap11PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1Mr58jzEVseUAfNYbGWCQyDtEDwvfHRi1"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1saY8Faz0KTKAAifUrbkQdLA2qkyEjOPI/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 12 - Transformers: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap12PDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=1txzOVNf8-jH4UfJ6SLnrtOfPd1Q3ebzd">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1GVNvYWa0WJA6oKg89qZre-UZEhABfm0l/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 13 - Graph neural networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap13PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1lQIV6nRp6LVfaMgpGFhuwEXG-lTEaAwe"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1YwF3U82c1mQ74c1WqHVTzLZ0j7GgKaWP/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 14 - Unsupervised learning: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap14PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1aMbI6iCuUvOywqk5pBOmppJu1L1anqsM"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1A-lBGv3NHl4L32NvfFgy1EKeSwY-0UeB/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">
|
|
||||||
PowerPoint Figures</a>
|
|
||||||
<li> Chapter 15 - Generative adversarial networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap15PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1EErnlZCOlXc3HK7m83T2Jh_0NzIUHvtL"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/10Ernk41ShOTf4IYkMD-l4dJfKATkXH4w/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 16 - Normalizing flows: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap16PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1B9bxtmdugwtg-b7Y4AdQKAIEVWxjx8l3"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1nLLzqb9pdfF_h6i1HUDSyp7kSMIkSUUA/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 17 - Variational autoencoders: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap17PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1SNtNIY7khlHQYMtaOH-FosSH3kWwL4b7"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1lQE4Bu7-LgvV2VlJOt_4dQT-kusYl7Vo/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Chapter 18 - Diffusion models: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap18PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1A-pIGl4PxjVMYOKAUG3aT4a8wD3G-q_r"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1x_ufIBtVPzWUvRieKMkpw5SdRjXWwdfR/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">
|
|
||||||
PowerPoint Figures</a>
|
|
||||||
<li> Chapter 19 - Deep reinforcement learning: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap19PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1a5WUoF7jeSgwC_PVdckJi1Gny46fCqh0"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1TnYmVbFNhmMFetbjyfXGmkxp1EHauMqr/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">
|
|
||||||
PowerPoint Figures </a>
|
|
||||||
<li> Chapter 20 - Why does deep learning work?: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap20PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1M2d0DHEgddAQoIedKSDTTt7m1ZdmBLQ3"> SVG Figures</a>
|
|
||||||
/
|
|
||||||
<a href="https://docs.google.com/presentation/d/1coxF4IsrCzDTLrNjRagHvqB_FBy10miA/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">
|
|
||||||
PowerPoint Figures</a>
|
|
||||||
<li> Chapter 21 - Deep learning and ethics: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap21PDF.zip">PDF Figures</a> / <a
|
|
||||||
href="https://drive.google.com/uc?export=download&id=1jixmFfwmZkW_UVYzcxmDcMsdFFtnZ0bU"> SVG Figures</a>/
|
|
||||||
<a
|
|
||||||
href="https://docs.google.com/presentation/d/1EtfzanZYILvi9_-Idm28zD94I_6OrN9R/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">PowerPoint
|
|
||||||
Figures</a>
|
|
||||||
<li> Appendices - <a href="https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLAppendixPDF.zip">PDF
|
|
||||||
Figures</a> / <a href="https://drive.google.com/uc?export=download&id=1k2j7hMN40ISPSg9skFYWFL3oZT7r8v-l">
|
|
||||||
SVG
|
|
||||||
Figures</a> / <a
|
|
||||||
href="https://docs.google.com/presentation/d/1_2cJHRnsoQQHst0rwZssv-XH4o5SEHks/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true">Powerpoint
|
|
||||||
Figures</a>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
Instructions for editing figures / equations can be found <a
|
|
||||||
href="https://drive.google.com/file/d/1T_MXXVR4AfyMnlEFI-UVDh--FXI5deAp/view?usp=sharing">here</a>.
|
|
||||||
|
|
||||||
<p> My slides for 20 lecture undergraduate deep learning course:</p>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=17RHb11BrydOvxSFNbRIomE1QKLVI087m">1. Introduction</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1491zkHULC7gDfqlV6cqUxyVYXZ-de-Ub">2. Supervised Learning</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1XkP1c9EhOBowla1rT1nnsDGMf2rZvrt7">3. Shallow Neural Networks</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1e2ejfZbbfMKLBv0v-tvBWBdI8gO3SSS1">4. Deep Neural Networks</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1fxQ_a1Q3eFPZ4kPqKbak6_emJK-JfnRH">5. Loss Functions</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=17QQ5ZzXBtR_uCNCUU1gPRWWRUeZN9exW">6. Fitting Models</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1hC8JUCOaFWiw3KGn0rm7nW6mEq242QDK">7. Computing Gradients</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1tSjCeAVg0JCeBcPgDJDbi7Gg43Qkh9_d">7b. Initialization</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1RVZW3KjEs0vNSGx3B2fdizddlr6I0wLl">8. Performance</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1LTicIKPRPbZRkkg6qOr1DSuOB72axood">9. Regularization</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1bGVuwAwrofzZdfvj267elIzkYMIvYFj0">10. Convolutional Networks</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=14w31QqWRDix1GdUE-na0_E0kGKBhtKzs">11. Image Generation</a></li>
|
|
||||||
<li><a href="https://drive.google.com/uc?export=download&id=1af6bTTjAbhDYfrDhboW7Fuv52Gk9ygKr">12. Transformers and LLMs</a></li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<h2>Resources for students</h2>
|
|
||||||
|
|
||||||
<p>Answers to selected questions: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/raw/main/UDL_Answer_Booklet_Students.pdf">PDF</a>
|
|
||||||
</p>
|
|
||||||
<p>Python notebooks: (Early ones more thoroughly tested than later ones!)</p>
|
|
||||||
|
|
||||||
<ul>
|
|
||||||
<li> Notebook 1.1 - Background mathematics: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap01/1_1_BackgroundMathematics.ipynb">ipynb/colab</a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 2.1 - Supervised learning: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap02/2_1_Supervised_Learning.ipynb">ipynb/colab</a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 3.1 - Shallow networks I: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 3.2 - Shallow networks II: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_2_Shallow_Networks_II.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 3.3 - Shallow network regions: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_3_Shallow_Network_Regions.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 3.4 - Activation functions: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_4_Activation_Functions.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 4.1 - Composing networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_1_Composing_Networks.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 4.2 - Clipping functions: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_2_Clipping_functions.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 4.3 - Deep networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_3_Deep_Networks.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 5.1 - Least squares loss: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 5.2 - Binary cross-entropy loss: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_2_Binary_Cross_Entropy_Loss.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 5.3 - Multiclass cross-entropy loss: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 6.1 - Line search: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_1_Line_Search.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 6.2 - Gradient descent: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_2_Gradient_Descent.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 6.3 - Stochastic gradient descent: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 6.4 - Momentum: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_4_Momentum.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 6.5 - Adam: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_5_Adam.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 7.1 - Backpropagation in toy model: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_1_Backpropagation_in_Toy_Model.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 7.2 - Backpropagation: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_2_Backpropagation.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 7.3 - Initialization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_3_Initialization.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 8.1 - MNIST-1D performance: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 8.2 - Bias-variance trade-off: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 8.3 - Double descent: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_3_Double_Descent.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 8.4 - High-dimensional spaces: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 9.1 - L2 regularization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_1_L2_Regularization.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 9.2 - Implicit regularization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_2_Implicit_Regularization.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 9.3 - Ensembling: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_3_Ensembling.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 9.4 - Bayesian approach: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 9.5 - Augmentation <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_5_Augmentation.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 10.1 - 1D convolution: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_1_1D_Convolution.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 10.2 - Convolution for MNIST-1D: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_2_Convolution_for_MNIST_1D.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 10.3 - 2D convolution: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_3_2D_Convolution.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 10.4 - Downsampling & upsampling: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 10.5 - Convolution for MNIST: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_5_Convolution_For_MNIST.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 11.1 - Shattered gradients: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_1_Shattered_Gradients.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 11.2 - Residual networks: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_2_Residual_Networks.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 11.3 - Batch normalization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_3_Batch_Normalization.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 12.1 - Self-attention: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_1_Self_Attention.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 12.2 - Multi-head self-attention: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 12.3 - Tokenization: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_3_Tokenization.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 12.4 - Decoding strategies: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_4_Decoding_Strategies.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 13.1 - Encoding graphs: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_1_Graph_Representation.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 13.2 - Graph classification : <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_2_Graph_Classification.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 13.3 - Neighborhood sampling: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_3_Neighborhood_Sampling.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 13.4 - Graph attention: <a
|
|
||||||
href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_4_Graph_Attention_Networks.ipynb">ipynb/colab </a>
|
|
||||||
</li>
|
|
||||||
<li> Notebook 15.1 - GAN toy example: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 15.2 - Wasserstein distance: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 16.1 - 1D normalizing flows: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_1_1D_Normalizing_Flows.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 16.2 - Autoregressive flows: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 16.3 - Contraction mappings: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_3_Contraction_Mappings.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 17.1 - Latent variable models: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_1_Latent_Variable_Models.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 17.2 - Reparameterization trick: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 17.3 - Importance sampling: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_3_Importance_Sampling.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 18.1 - Diffusion encoder: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 18.2 - 1D diffusion model: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 18.3 - Reparameterized model: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_3_Reparameterized_Model.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 18.4 - Families of diffusion models: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_4_Families_of_Diffusion_Models.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 19.1 - Markov decision processes: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 19.2 - Dynamic programming: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_2_Dynamic_Programming.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 19.3 - Monte-Carlo methods: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_3_Monte_Carlo_Methods.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 19.4 - Temporal difference methods: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 19.5 - Control variates: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_5_Control_Variates.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 20.1 - Random data: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_1_Random_Data.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 20.2 - Full-batch gradient descent: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_2_Full_Batch_Gradient_Descent.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 20.3 - Lottery tickets: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_3_Lottery_Tickets.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 20.4 - Adversarial attacks: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_4_Adversarial_Attacks.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 21.1 - Bias mitigation: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb">ipynb/colab </a></li>
|
|
||||||
<li> Notebook 21.2 - Explainability: <a href="https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap21/21_2_Explainability.ipynb">ipynb/colab </a></li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
|
|
||||||
<br>
|
|
||||||
<h2>Citation</h2>
|
|
||||||
<pre><code>
|
|
||||||
@book{prince2023understanding,
|
|
||||||
author = "Simon J.D. Prince",
|
|
||||||
title = "Understanding Deep Learning",
|
|
||||||
publisher = "MIT Press",
|
|
||||||
year = 2023,
|
|
||||||
url = "http://udlbook.com"
|
|
||||||
}
|
|
||||||
</code></pre>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
|
|||||||
8
jsconfig.json
Normal file
8
jsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"baseUrl": "./",
|
||||||
|
"paths": {
|
||||||
|
"@/*": ["src/*"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
4457
package-lock.json
generated
Normal file
4457
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
36
package.json
Executable file
36
package.json
Executable file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"name": "udlbook-website",
|
||||||
|
"version": "0.1.0",
|
||||||
|
"private": true,
|
||||||
|
"homepage": "https://udlbook.github.io/udlbook",
|
||||||
|
"type": "module",
|
||||||
|
"scripts": {
|
||||||
|
"dev": "vite",
|
||||||
|
"build": "vite build",
|
||||||
|
"preview": "vite preview",
|
||||||
|
"lint": "eslint . --ext js,jsx --report-unused-disable-directives --max-warnings 0",
|
||||||
|
"predeploy": "npm run build",
|
||||||
|
"deploy": "gh-pages -d dist",
|
||||||
|
"clean": "rm -rf node_modules dist",
|
||||||
|
"format": "prettier --write ."
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"react": "^18.3.1",
|
||||||
|
"react-dom": "^18.3.1",
|
||||||
|
"react-icons": "^5.2.1",
|
||||||
|
"react-router-dom": "^6.23.1",
|
||||||
|
"react-scroll": "^1.8.4",
|
||||||
|
"styled-components": "^6.1.11"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@vitejs/plugin-react-swc": "^3.5.0",
|
||||||
|
"eslint": "^8.57.0",
|
||||||
|
"eslint-plugin-react": "^7.34.2",
|
||||||
|
"eslint-plugin-react-hooks": "^4.6.2",
|
||||||
|
"eslint-plugin-react-refresh": "^0.4.7",
|
||||||
|
"gh-pages": "^6.1.1",
|
||||||
|
"prettier": "^3.3.1",
|
||||||
|
"prettier-plugin-organize-imports": "^3.2.4",
|
||||||
|
"vite": "^5.2.12"
|
||||||
|
}
|
||||||
|
}
|
||||||
BIN
public/NMI_Review.pdf
Normal file
BIN
public/NMI_Review.pdf
Normal file
Binary file not shown.
BIN
public/favicon.ico
Normal file
BIN
public/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
12
src/App.jsx
Executable file
12
src/App.jsx
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
import Index from "@/pages";
|
||||||
|
import { BrowserRouter as Router, Route, Routes } from "react-router-dom";
|
||||||
|
|
||||||
|
export default function App() {
|
||||||
|
return (
|
||||||
|
<Router>
|
||||||
|
<Routes>
|
||||||
|
<Route exact path="/udlbook" element={<Index />} />
|
||||||
|
</Routes>
|
||||||
|
</Router>
|
||||||
|
);
|
||||||
|
}
|
||||||
145
src/components/Footer/FooterElements.jsx
Executable file
145
src/components/Footer/FooterElements.jsx
Executable file
@@ -0,0 +1,145 @@
|
|||||||
|
import { Link } from "react-router-dom";
|
||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const FooterContainer = styled.footer`
|
||||||
|
background-color: #101522;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterWrap = styled.div`
|
||||||
|
padding: 48x 24px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin: 0 auto;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterLinksContainer = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
|
||||||
|
@media screen and (max-width: 820px) {
|
||||||
|
padding-top: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterLinksWrapper = styled.div`
|
||||||
|
display: flex;
|
||||||
|
|
||||||
|
@media screen and (max-width: 820px) {
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterLinkItems = styled.div`
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: flex-start;
|
||||||
|
margin: 16px;
|
||||||
|
text-align: left;
|
||||||
|
width: 160px;
|
||||||
|
box-sizing: border-box;
|
||||||
|
color: #fff;
|
||||||
|
|
||||||
|
@media screen and (max-width: 420px) {
|
||||||
|
margin: 0;
|
||||||
|
padding: 10px;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterLinkTitle = styled.h1`
|
||||||
|
font-size: 14px;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterLink = styled(Link)`
|
||||||
|
color: #ffffff;
|
||||||
|
text-decoration: none;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
font-size: 14px;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
color: #01bf71;
|
||||||
|
transition: 0.3s ease-in-out;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialMedia = styled.section`
|
||||||
|
max-width: 1000px;
|
||||||
|
width: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialMediaWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
align-items: center;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin: 20px auto 0 auto;
|
||||||
|
|
||||||
|
@media screen and (max-width: 820px) {
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialAttrWrap = styled.div`
|
||||||
|
color: #fff;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin: 10px auto 0 auto;
|
||||||
|
|
||||||
|
@media screen and (max-width: 820px) {
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialLogo = styled(Link)`
|
||||||
|
color: #fff;
|
||||||
|
justify-self: start;
|
||||||
|
cursor: pointer;
|
||||||
|
text-decoration: none;
|
||||||
|
font-size: 1.5rem;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
font-weight: bold;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 20px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const WebsiteRights = styled.small`
|
||||||
|
color: #fff;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialIcons = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
align-items: center;
|
||||||
|
width: 60px;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SocialIconLink = styled.a`
|
||||||
|
color: #fff;
|
||||||
|
font-size: 24px;
|
||||||
|
margin-right: 8px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const FooterImg = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
84
src/components/Footer/index.jsx
Executable file
84
src/components/Footer/index.jsx
Executable file
@@ -0,0 +1,84 @@
|
|||||||
|
import {
|
||||||
|
FooterContainer,
|
||||||
|
FooterWrap,
|
||||||
|
SocialIconLink,
|
||||||
|
SocialIcons,
|
||||||
|
SocialLogo,
|
||||||
|
SocialMedia,
|
||||||
|
SocialMediaWrap,
|
||||||
|
WebsiteRights,
|
||||||
|
} from "@/components/Footer/FooterElements";
|
||||||
|
import { FaGithub, FaLinkedin } from "react-icons/fa";
|
||||||
|
import { FaSquareXTwitter } from "react-icons/fa6";
|
||||||
|
import { animateScroll as scroll } from "react-scroll";
|
||||||
|
|
||||||
|
const images = [
|
||||||
|
"https://freepik.com/free-vector/hand-coding-concept-illustration_21864184.htm#query=coding&position=17&from_view=search&track=sph&uuid=5896d847-38e4-4cb9-8fe1-103041c7c933",
|
||||||
|
"https://freepik.com/free-vector/mathematics-concept-illustration_10733824.htm#query=professor&position=13&from_view=search&track=sph&uuid=5b1a188a-64c5-45af-aae2-8573bc1bed3c",
|
||||||
|
"https://freepik.com/free-vector/content-concept-illustration_7171429.htm#query=media&position=3&from_view=search&track=sph&uuid=c7e35cf2-d85d-4bba-91a6-1cd883dcf153",
|
||||||
|
"https://freepik.com/free-vector/library-concept-illustration_9148008.htm#query=library&position=40&from_view=search&track=sph&uuid=abecc792-b6b2-4ec0-b318-5e6cc73ba649",
|
||||||
|
];
|
||||||
|
|
||||||
|
const socials = [
|
||||||
|
{
|
||||||
|
href: "https://twitter.com/SimonPrinceAI",
|
||||||
|
icon: FaSquareXTwitter,
|
||||||
|
alt: "Twitter",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
href: "https://linkedin.com/in/simon-prince-615bb9165/",
|
||||||
|
icon: FaLinkedin,
|
||||||
|
alt: "LinkedIn",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
href: "https://github.com/udlbook/udlbook",
|
||||||
|
icon: FaGithub,
|
||||||
|
alt: "GitHub",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function Footer() {
|
||||||
|
const scrollToHome = () => {
|
||||||
|
scroll.scrollToTop();
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<FooterContainer>
|
||||||
|
<FooterWrap>
|
||||||
|
<SocialMedia>
|
||||||
|
<SocialMediaWrap>
|
||||||
|
<SocialLogo to="/udlbook" onClick={scrollToHome}>
|
||||||
|
Understanding Deep Learning
|
||||||
|
</SocialLogo>
|
||||||
|
<WebsiteRights>
|
||||||
|
© {new Date().getFullYear()} Simon J.D. Prince
|
||||||
|
</WebsiteRights>
|
||||||
|
<WebsiteRights>
|
||||||
|
Images by StorySet on FreePik:{" "}
|
||||||
|
{images.map((image, index) => (
|
||||||
|
<a key={index} href={image}>
|
||||||
|
[{index + 1}]
|
||||||
|
</a>
|
||||||
|
))}
|
||||||
|
</WebsiteRights>
|
||||||
|
<SocialIcons>
|
||||||
|
{socials.map((social, index) => (
|
||||||
|
<SocialIconLink
|
||||||
|
key={index}
|
||||||
|
href={social.href}
|
||||||
|
target="_blank"
|
||||||
|
aria-label={social.alt}
|
||||||
|
alt={social.alt}
|
||||||
|
>
|
||||||
|
<social.icon />
|
||||||
|
</SocialIconLink>
|
||||||
|
))}
|
||||||
|
</SocialIcons>
|
||||||
|
</SocialMediaWrap>
|
||||||
|
</SocialMedia>
|
||||||
|
</FooterWrap>
|
||||||
|
</FooterContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
294
src/components/HeroSection/HeroElements.jsx
Executable file
294
src/components/HeroSection/HeroElements.jsx
Executable file
@@ -0,0 +1,294 @@
|
|||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const HeroContainer = styled.div`
|
||||||
|
background: #57c6d1;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
padding: 0 0px;
|
||||||
|
position: static;
|
||||||
|
z-index: 1;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroContent = styled.div`
|
||||||
|
z-index: 3;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
position: static;
|
||||||
|
padding: 8px 24px;
|
||||||
|
margin: 80px 0px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroH1 = styled.h1`
|
||||||
|
color: #fff;
|
||||||
|
font-size: 48px;
|
||||||
|
text-align: center;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 40px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroP = styled.p`
|
||||||
|
margin-top: 24px;
|
||||||
|
color: #fff;
|
||||||
|
font-size: 24px;
|
||||||
|
text-align: center;
|
||||||
|
max-width: 600px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 18px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroBtnWrapper = styled.div`
|
||||||
|
margin-top: 32px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroRow = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 1fr 1fr;
|
||||||
|
gap: 20px;
|
||||||
|
align-items: top;
|
||||||
|
grid-template-areas: "col1 col2";
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
grid-template-areas:
|
||||||
|
"col2"
|
||||||
|
"col1";
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsItem = styled.div`
|
||||||
|
margin-left: 4px;
|
||||||
|
color: #000000;
|
||||||
|
font-size: 16px;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
display: flex;
|
||||||
|
justify-content: start;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsItemDate = styled.div`
|
||||||
|
width: 20%;
|
||||||
|
margin-right: 20px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsItemContent = styled.div`
|
||||||
|
width: 80%;
|
||||||
|
color: #000000;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroColumn1 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
margin-left: 12px;
|
||||||
|
margin-top: 60px;
|
||||||
|
padding: 10px 15px;
|
||||||
|
grid-area: col1;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
justify-content: space-between;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
margin-left: 0;
|
||||||
|
margin-top: 20px;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroColumn2 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col2;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
flex-direction: column;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TextWrapper = styled.div`
|
||||||
|
max-width: 540px;
|
||||||
|
padding-top: 0;
|
||||||
|
padding-bottom: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Img = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroDownloadsImg = styled.img`
|
||||||
|
margin-top: 5px;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 0;
|
||||||
|
padding-right: 0;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroLink = styled.a`
|
||||||
|
color: #fff;
|
||||||
|
text-decoration: none;
|
||||||
|
padding: 0.6rem 0rem 0rem 0rem;
|
||||||
|
cursor: pointer;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #fff;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const UDLLink = styled.a`
|
||||||
|
text-decoration: none;
|
||||||
|
color: #000;
|
||||||
|
font-weight: 300;
|
||||||
|
margin: 0 2px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #000;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsTitle = styled.div`
|
||||||
|
margin-left: 0px;
|
||||||
|
color: #000000;
|
||||||
|
font-size: 16px;
|
||||||
|
font-weight: bold;
|
||||||
|
line-height: 16px;
|
||||||
|
margin-bottom: 36px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 18px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroCitationTitle = styled.div`
|
||||||
|
margin-left: 0px;
|
||||||
|
color: #000000;
|
||||||
|
font-size: 16px;
|
||||||
|
font-weight: bold;
|
||||||
|
line-height: 16px;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
margin-top: 36px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 18px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsBlock = styled.div``;
|
||||||
|
|
||||||
|
export const HeroCitationBlock = styled.div`
|
||||||
|
font-size: 14px;
|
||||||
|
margin-bottom: 0px;
|
||||||
|
margin-top: 0px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroFollowBlock = styled.div`
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const HeroNewsMoreButton = styled.button`
|
||||||
|
background: #fff;
|
||||||
|
color: #000;
|
||||||
|
font-size: 16px;
|
||||||
|
padding: 10px 24px;
|
||||||
|
border: none;
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
margin-top: 20px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
align-self: center;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
background: #000;
|
||||||
|
color: #fff;
|
||||||
|
}
|
||||||
|
`;
|
||||||
209
src/components/HeroSection/index.jsx
Executable file
209
src/components/HeroSection/index.jsx
Executable file
@@ -0,0 +1,209 @@
|
|||||||
|
import {
|
||||||
|
HeroCitationBlock,
|
||||||
|
HeroCitationTitle,
|
||||||
|
HeroColumn1,
|
||||||
|
HeroColumn2,
|
||||||
|
HeroContainer,
|
||||||
|
HeroContent,
|
||||||
|
HeroDownloadsImg,
|
||||||
|
HeroFollowBlock,
|
||||||
|
HeroImgWrap,
|
||||||
|
HeroLink,
|
||||||
|
HeroNewsBlock,
|
||||||
|
HeroNewsItem,
|
||||||
|
HeroNewsItemContent,
|
||||||
|
HeroNewsItemDate,
|
||||||
|
HeroNewsMoreButton,
|
||||||
|
HeroNewsTitle,
|
||||||
|
HeroRow,
|
||||||
|
Img,
|
||||||
|
UDLLink,
|
||||||
|
} from "@/components/HeroSection/HeroElements";
|
||||||
|
import img from "@/images/book_cover.jpg";
|
||||||
|
import { useState } from "react";
|
||||||
|
|
||||||
|
const citation = `
|
||||||
|
@book{prince2023understanding,
|
||||||
|
author = "Simon J.D. Prince",
|
||||||
|
title = "Understanding Deep Learning",
|
||||||
|
publisher = "The MIT Press",
|
||||||
|
year = 2023,
|
||||||
|
url = "http://udlbook.com"
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
const news = [
|
||||||
|
{
|
||||||
|
date: "05/22/24",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
New{" "}
|
||||||
|
<UDLLink href="https://borealisai.com/research-blogs/neural-tangent-kernel-applications/">
|
||||||
|
blog
|
||||||
|
</UDLLink>{" "}
|
||||||
|
about the applications of the neural tangent kernel.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
date: "05/10/24",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
Positive{" "}
|
||||||
|
<UDLLink href="https://github.com/udlbook/udlbook/blob/main/public/NMI_Review.pdf">
|
||||||
|
review
|
||||||
|
</UDLLink>{" "}
|
||||||
|
in Nature Machine Intelligence.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// date: "03/12/24",
|
||||||
|
// content: <HeroNewsItemContent>Book now available again.</HeroNewsItemContent>,
|
||||||
|
// },
|
||||||
|
{
|
||||||
|
date: "02/21/24",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
New blog about the{" "}
|
||||||
|
<UDLLink href="https://borealisai.com/research-blogs/the-neural-tangent-kernel/">
|
||||||
|
Neural Tangent Kernel
|
||||||
|
</UDLLink>
|
||||||
|
.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// date: "02/15/24",
|
||||||
|
// content: (
|
||||||
|
// <HeroNewsItemContent>
|
||||||
|
// First printing of book has sold out in most places. Second printing available
|
||||||
|
// mid-March.
|
||||||
|
// </HeroNewsItemContent>
|
||||||
|
// ),
|
||||||
|
// },
|
||||||
|
{
|
||||||
|
date: "01/29/24",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
New blog about{" "}
|
||||||
|
<UDLLink href="https://borealisai.com/research-blogs/gradient-flow/">
|
||||||
|
gradient flow
|
||||||
|
</UDLLink>{" "}
|
||||||
|
published.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
date: "12/26/23",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
Machine Learning Street Talk{" "}
|
||||||
|
<UDLLink href="https://youtube.com/watch?v=sJXn4Cl4oww">podcast</UDLLink> discussing
|
||||||
|
book.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
date: "12/19/23",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
Deeper Insights{" "}
|
||||||
|
<UDLLink href="https://podcasts.apple.com/us/podcast/understanding-deep-learning-with-simon-prince/id1669436318?i=1000638269385">
|
||||||
|
podcast
|
||||||
|
</UDLLink>{" "}
|
||||||
|
discussing book.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
date: "12/06/23",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
<UDLLink href="https://borealisai.com/news/understanding-deep-learning/">
|
||||||
|
Interview
|
||||||
|
</UDLLink>{" "}
|
||||||
|
with Borealis AI.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
date: "12/05/23",
|
||||||
|
content: (
|
||||||
|
<HeroNewsItemContent>
|
||||||
|
Book released by{" "}
|
||||||
|
<UDLLink href="https://mitpress.mit.edu/9780262048644/understanding-deep-learning/">
|
||||||
|
The MIT Press
|
||||||
|
</UDLLink>
|
||||||
|
.
|
||||||
|
</HeroNewsItemContent>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function HeroSection() {
|
||||||
|
const [showMoreNews, setShowMoreNews] = useState(false);
|
||||||
|
|
||||||
|
const toggleShowMore = () => {
|
||||||
|
setShowMoreNews((p) => !p);
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<HeroContainer id="home">
|
||||||
|
<HeroContent>
|
||||||
|
<HeroRow>
|
||||||
|
<HeroColumn1>
|
||||||
|
<HeroNewsBlock>
|
||||||
|
<HeroNewsTitle>RECENT NEWS:</HeroNewsTitle>
|
||||||
|
{(showMoreNews ? news : news.slice(0, 7)).map((item, index) => (
|
||||||
|
<HeroNewsItem key={index}>
|
||||||
|
<HeroNewsItemDate>{item.date}</HeroNewsItemDate>
|
||||||
|
{item.content}
|
||||||
|
</HeroNewsItem>
|
||||||
|
))}
|
||||||
|
<HeroNewsMoreButton onClick={toggleShowMore}>
|
||||||
|
{showMoreNews ? "Show less" : "Show more"}
|
||||||
|
</HeroNewsMoreButton>
|
||||||
|
</HeroNewsBlock>
|
||||||
|
<HeroCitationTitle>CITATION:</HeroCitationTitle>
|
||||||
|
<HeroCitationBlock>
|
||||||
|
<pre>
|
||||||
|
<code>{citation}</code>
|
||||||
|
</pre>
|
||||||
|
</HeroCitationBlock>
|
||||||
|
<HeroFollowBlock>
|
||||||
|
Follow me on{" "}
|
||||||
|
<UDLLink href="https://twitter.com/SimonPrinceAI">Twitter</UDLLink> or{" "}
|
||||||
|
<UDLLink href="https://linkedin.com/in/simon-prince-615bb9165/">
|
||||||
|
LinkedIn
|
||||||
|
</UDLLink>{" "}
|
||||||
|
for updates.
|
||||||
|
</HeroFollowBlock>
|
||||||
|
</HeroColumn1>
|
||||||
|
<HeroColumn2>
|
||||||
|
<HeroImgWrap>
|
||||||
|
<Img src={img} alt="Book Cover" />
|
||||||
|
</HeroImgWrap>
|
||||||
|
<HeroLink href="https://github.com/udlbook/udlbook/releases/download/v4.0.1/UnderstandingDeepLearning_05_27_24_C.pdf">
|
||||||
|
Download full PDF (27 May 2024)
|
||||||
|
</HeroLink>
|
||||||
|
<br />
|
||||||
|
<HeroDownloadsImg
|
||||||
|
src="https://img.shields.io/github/downloads/udlbook/udlbook/total"
|
||||||
|
alt="download stats shield"
|
||||||
|
/>
|
||||||
|
<HeroLink href="https://mitpress.mit.edu/9780262048644/understanding-deep-learning/">
|
||||||
|
Buy the book
|
||||||
|
</HeroLink>
|
||||||
|
<HeroLink href="https://github.com/udlbook/udlbook/raw/main/UDL_Answer_Booklet_Students.pdf">
|
||||||
|
Answers to selected questions
|
||||||
|
</HeroLink>
|
||||||
|
<HeroLink href="https://github.com/udlbook/udlbook/raw/main/UDL_Errata.pdf">
|
||||||
|
Errata
|
||||||
|
</HeroLink>
|
||||||
|
</HeroColumn2>
|
||||||
|
</HeroRow>
|
||||||
|
</HeroContent>
|
||||||
|
</HeroContainer>
|
||||||
|
);
|
||||||
|
}
|
||||||
163
src/components/Instructors/InstructorsElements.jsx
Normal file
163
src/components/Instructors/InstructorsElements.jsx
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const InstructorsContainer = styled.div`
|
||||||
|
color: #fff;
|
||||||
|
/* background: #f9f9f9; */
|
||||||
|
background: ${({ lightBg }) => (lightBg ? "#57c6d1" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
padding: 100px 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const InstructorsWrapper = styled.div`
|
||||||
|
display: grid;
|
||||||
|
z-index: 1;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin-right: auto;
|
||||||
|
margin-left: auto;
|
||||||
|
padding: 0 24px;
|
||||||
|
justify-content: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const InstructorsRow = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: center;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const InstructorsRow2 = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: top;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column1 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col1;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column2 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col2;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TextWrapper = styled.div`
|
||||||
|
max-width: 540px;
|
||||||
|
padding-top: 0;
|
||||||
|
padding-bottom: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TopLine = styled.p`
|
||||||
|
color: #773c23;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 700;
|
||||||
|
letter-spacing: 1.4px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Heading = styled.h1`
|
||||||
|
margin-bottom: 24px;
|
||||||
|
font-size: 48px;
|
||||||
|
line-height: 1.1;
|
||||||
|
font-weight: 600;
|
||||||
|
color: ${({ lightText }) => (lightText ? "#f7f8fa" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Subtitle = styled.p`
|
||||||
|
max-width: 440px;
|
||||||
|
margin-bottom: 35px;
|
||||||
|
font-size: 18px;
|
||||||
|
line-height: 24px;
|
||||||
|
color: ${({ darkText }) => (darkText ? "#010606" : "#fff")};
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const BtnWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: flex-start;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const ImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Img = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const InstructorsContent = styled.div`
|
||||||
|
z-index: 3;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
position: static;
|
||||||
|
padding: 8px 0px;
|
||||||
|
margin: 10px 0px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: left;
|
||||||
|
list-style-position: inside;
|
||||||
|
|
||||||
|
@media screen and (max-width: 1050px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 10px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const InstructorsLink = styled.a`
|
||||||
|
text-decoration: none;
|
||||||
|
color: #555;
|
||||||
|
font-weight: 300;
|
||||||
|
margin: 0 2px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #555;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
334
src/components/Instructors/index.jsx
Normal file
334
src/components/Instructors/index.jsx
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
import {
|
||||||
|
Column1,
|
||||||
|
Column2,
|
||||||
|
Heading,
|
||||||
|
Img,
|
||||||
|
ImgWrap,
|
||||||
|
InstructorsContainer,
|
||||||
|
InstructorsContent,
|
||||||
|
InstructorsLink,
|
||||||
|
InstructorsRow,
|
||||||
|
InstructorsRow2,
|
||||||
|
InstructorsWrapper,
|
||||||
|
Subtitle,
|
||||||
|
TextWrapper,
|
||||||
|
TopLine,
|
||||||
|
} from "@/components/Instructors/InstructorsElements";
|
||||||
|
import img from "@/images/instructor.svg";
|
||||||
|
|
||||||
|
const fullSlides = [
|
||||||
|
{
|
||||||
|
text: "Introduction",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=17RHb11BrydOvxSFNbRIomE1QKLVI087m",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Supervised Learning",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1491zkHULC7gDfqlV6cqUxyVYXZ-de-Ub",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Shallow Neural Networks",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1XkP1c9EhOBowla1rT1nnsDGMf2rZvrt7",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Deep Neural Networks",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1e2ejfZbbfMKLBv0v-tvBWBdI8gO3SSS1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Loss Functions",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1fxQ_a1Q3eFPZ4kPqKbak6_emJK-JfnRH",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Fitting Models",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=17QQ5ZzXBtR_uCNCUU1gPRWWRUeZN9exW",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Computing Gradients",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1hC8JUCOaFWiw3KGn0rm7nW6mEq242QDK",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Initialization",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1tSjCeAVg0JCeBcPgDJDbi7Gg43Qkh9_d",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Performance",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1RVZW3KjEs0vNSGx3B2fdizddlr6I0wLl",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Regularization",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1LTicIKPRPbZRkkg6qOr1DSuOB72axood",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Convolutional Networks",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1bGVuwAwrofzZdfvj267elIzkYMIvYFj0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Image Generation",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=14w31QqWRDix1GdUE-na0_E0kGKBhtKzs",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Transformers and LLMs",
|
||||||
|
link: "https://drive.google.com/uc?export=download&id=1af6bTTjAbhDYfrDhboW7Fuv52Gk9ygKr",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const figures = [
|
||||||
|
{
|
||||||
|
text: "Introduction",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap1PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1udnl5pUOAc8DcAQ7HQwyzP9pwL95ynnv",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1IjTqIUvWCJc71b5vEJYte-Dwujcp7rvG/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Supervised learning",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap2PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1VSxcU5y1qNFlmd3Lb3uOWyzILuOj1Dla",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1Br7R01ROtRWPlNhC_KOommeHAWMBpWtz/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Shallow neural networks",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap3PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=19kZFWlXhzN82Zx02ByMmSZOO4T41fmqI",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1e9M3jB5I9qZ4dCBY90Q3Hwft_i068QVQ/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Deep neural networks",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap4PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1ojr0ebsOhzvS04ItAflX2cVmYqHQHZUa",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1LTSsmY4mMrJbqXVvoTOCkQwHrRKoYnJj/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Loss functions",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap5PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=17MJO7fiMpFZVqKeqXTbQ36AMpmR4GizZ",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1gcpC_3z9oRp87eMkoco-kdLD-MM54Puk/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Training models",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap6PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1VPdhFRnCr9_idTrX0UdHKGAw2shUuwhK",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1AKoeggAFBl9yLC7X5tushAGzCCxmB7EY/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Gradients and initialization",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap7PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1TTl4gvrTvNbegnml4CoGoKOOd6O8-PGs",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/11zhB6PI-Dp6Ogmr4IcI6fbvbqNqLyYcz/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Measuring performance",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap8PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=19eQOnygd_l0DzgtJxXuYnWa4z7QKJrJx",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1SHRmJscDLUuQrG7tmysnScb3ZUAqVMZo/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Regularization",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap9PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1LprgnUGL7xAM9-jlGZC9LhMPeefjY0r0",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1VwIfvjpdfTny6sEfu4ZETwCnw6m8Eg-5/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Convolutional networks",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap10PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1-Wb3VzaSvVeRzoUzJbI2JjZE0uwqupM9",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1MtfKBC4Y9hWwGqeP6DVwUNbi1j5ncQCg/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Residual networks",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap11PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1Mr58jzEVseUAfNYbGWCQyDtEDwvfHRi1",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1saY8Faz0KTKAAifUrbkQdLA2qkyEjOPI/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Transformers",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap12PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1txzOVNf8-jH4UfJ6SLnrtOfPd1Q3ebzd",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1GVNvYWa0WJA6oKg89qZre-UZEhABfm0l/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Graph neural networks",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap13PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1lQIV6nRp6LVfaMgpGFhuwEXG-lTEaAwe",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1YwF3U82c1mQ74c1WqHVTzLZ0j7GgKaWP/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Unsupervised learning",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap14PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1aMbI6iCuUvOywqk5pBOmppJu1L1anqsM",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1A-lBGv3NHl4L32NvfFgy1EKeSwY-0UeB/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "GANs",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap15PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1EErnlZCOlXc3HK7m83T2Jh_0NzIUHvtL",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/10Ernk41ShOTf4IYkMD-l4dJfKATkXH4w/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Normalizing flows",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap16PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1SNtNIY7khlHQYMtaOH-FosSH3kWwL4b7",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1nLLzqb9pdfF_h6i1HUDSyp7kSMIkSUUA/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Variational autoencoders",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap17PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1B9bxtmdugwtg-b7Y4AdQKAIEVWxjx8l3",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1lQE4Bu7-LgvV2VlJOt_4dQT-kusYl7Vo/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Diffusion models",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap18PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1A-pIGl4PxjVMYOKAUG3aT4a8wD3G-q_r",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1x_ufIBtVPzWUvRieKMkpw5SdRjXWwdfR/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Deep reinforcement learning",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap19PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1a5WUoF7jeSgwC_PVdckJi1Gny46fCqh0",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1TnYmVbFNhmMFetbjyfXGmkxp1EHauMqr/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Why does deep learning work?",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap20PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1M2d0DHEgddAQoIedKSDTTt7m1ZdmBLQ3",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1coxF4IsrCzDTLrNjRagHvqB_FBy10miA/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Deep learning and ethics",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLChap21PDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1jixmFfwmZkW_UVYzcxmDcMsdFFtnZ0bU",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1EtfzanZYILvi9_-Idm28zD94I_6OrN9R/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Appendices",
|
||||||
|
links: {
|
||||||
|
pdf: "https://github.com/udlbook/udlbook/raw/main/PDFFigures/UDLAppendixPDF.zip",
|
||||||
|
svg: "https://drive.google.com/uc?export=download&id=1k2j7hMN40ISPSg9skFYWFL3oZT7r8v-l",
|
||||||
|
pptx: "https://docs.google.com/presentation/d/1_2cJHRnsoQQHst0rwZssv-XH4o5SEHks/edit?usp=drive_link&ouid=110441678248547154185&rtpof=true&sd=true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function InstructorsSection() {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<InstructorsContainer lightBg={true} id="Instructors">
|
||||||
|
<InstructorsWrapper>
|
||||||
|
<InstructorsRow imgStart={false}>
|
||||||
|
<Column1>
|
||||||
|
<TextWrapper>
|
||||||
|
<TopLine>Instructors</TopLine>
|
||||||
|
<Heading lightText={false}>Resources for instructors</Heading>
|
||||||
|
<Subtitle darkText={true}>
|
||||||
|
All the figures in vector and image formats, full slides for
|
||||||
|
first twelve chapters, instructor answer booklet
|
||||||
|
</Subtitle>
|
||||||
|
</TextWrapper>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<ImgWrap>
|
||||||
|
<Img src={img} alt="Instructor" />
|
||||||
|
</ImgWrap>
|
||||||
|
</Column2>
|
||||||
|
</InstructorsRow>
|
||||||
|
<InstructorsRow2>
|
||||||
|
<Column1>
|
||||||
|
<TopLine>Register</TopLine>
|
||||||
|
<InstructorsLink href="https://mitpress.ublish.com/request?cri=15055">
|
||||||
|
Register
|
||||||
|
</InstructorsLink>{" "}
|
||||||
|
with MIT Press for answer booklet.
|
||||||
|
<InstructorsContent></InstructorsContent>
|
||||||
|
<TopLine>Full slides</TopLine>
|
||||||
|
<InstructorsContent>
|
||||||
|
Slides for 20 lecture undergraduate deep learning course:
|
||||||
|
</InstructorsContent>
|
||||||
|
<InstructorsContent>
|
||||||
|
<ol>
|
||||||
|
{fullSlides.map((slide, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
{slide.text}{" "}
|
||||||
|
<InstructorsLink href={slide.link}>
|
||||||
|
PPTX
|
||||||
|
</InstructorsLink>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ol>
|
||||||
|
</InstructorsContent>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<TopLine>Figures</TopLine>
|
||||||
|
<InstructorsContent>
|
||||||
|
<ol>
|
||||||
|
{figures.map((figure, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
{figure.text}:{" "}
|
||||||
|
<InstructorsLink href={figure.links.pdf}>
|
||||||
|
PDF
|
||||||
|
</InstructorsLink>{" "}
|
||||||
|
/{" "}
|
||||||
|
<InstructorsLink href={figure.links.svg}>
|
||||||
|
{" "}
|
||||||
|
SVG
|
||||||
|
</InstructorsLink>{" "}
|
||||||
|
/{" "}
|
||||||
|
<InstructorsLink href={figure.links.pptx}>
|
||||||
|
PPTX{" "}
|
||||||
|
</InstructorsLink>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ol>
|
||||||
|
</InstructorsContent>
|
||||||
|
<InstructorsLink href="https://drive.google.com/file/d/1T_MXXVR4AfyMnlEFI-UVDh--FXI5deAp/view?usp=sharing">
|
||||||
|
Instructions
|
||||||
|
</InstructorsLink>{" "}
|
||||||
|
for editing equations in figures.
|
||||||
|
<InstructorsContent></InstructorsContent>
|
||||||
|
</Column2>
|
||||||
|
</InstructorsRow2>
|
||||||
|
</InstructorsWrapper>
|
||||||
|
</InstructorsContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
179
src/components/Media/MediaElements.jsx
Normal file
179
src/components/Media/MediaElements.jsx
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const MediaContainer = styled.div`
|
||||||
|
color: #fff;
|
||||||
|
/* background: #f9f9f9; */
|
||||||
|
background: ${({ lightBg }) => (lightBg ? "#f9f9f9" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
padding: 100px 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaWrapper = styled.div`
|
||||||
|
display: grid;
|
||||||
|
z-index: 1;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin-right: auto;
|
||||||
|
margin-left: auto;
|
||||||
|
padding: 0 24px;
|
||||||
|
justify-content: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaRow = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: center;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column1 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col1;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column2 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col2;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TextWrapper = styled.div`
|
||||||
|
max-width: 540px;
|
||||||
|
padding-top: 0;
|
||||||
|
padding-bottom: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TopLine = styled.p`
|
||||||
|
color: #57c6d1;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 700;
|
||||||
|
letter-spacing: 1.4px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Heading = styled.h1`
|
||||||
|
margin-bottom: 24px;
|
||||||
|
font-size: 48px;
|
||||||
|
line-height: 1.1;
|
||||||
|
font-weight: 600;
|
||||||
|
color: ${({ lightText }) => (lightText ? "#f7f8fa" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Subtitle = styled.p`
|
||||||
|
max-width: 440px;
|
||||||
|
margin-bottom: 35px;
|
||||||
|
font-size: 18px;
|
||||||
|
line-height: 24px;
|
||||||
|
color: ${({ darkText }) => (darkText ? "#010606" : "#fff")};
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const BtnWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: flex-start;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const ImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Img = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaTextBlock = styled.div`
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 18px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaContent = styled.div`
|
||||||
|
z-index: 3;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
position: static;
|
||||||
|
padding: 8px 0px;
|
||||||
|
margin: 10px 0px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: left;
|
||||||
|
list-style-position: inside;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaRow2 = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: top;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const VideoFrame = styled.div`
|
||||||
|
width: 560px;
|
||||||
|
height: 315px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 1050px) {
|
||||||
|
width: 280px;
|
||||||
|
height: 157px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MediaLink = styled.a`
|
||||||
|
text-decoration: none;
|
||||||
|
color: #57c6d1;
|
||||||
|
font-weight: 300;
|
||||||
|
margin: 0 2px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #57c6d1;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
164
src/components/Media/index.jsx
Normal file
164
src/components/Media/index.jsx
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
import {
|
||||||
|
Column1,
|
||||||
|
Column2,
|
||||||
|
Heading,
|
||||||
|
Img,
|
||||||
|
ImgWrap,
|
||||||
|
MediaContainer,
|
||||||
|
MediaContent,
|
||||||
|
MediaLink,
|
||||||
|
MediaRow,
|
||||||
|
MediaRow2,
|
||||||
|
MediaWrapper,
|
||||||
|
Subtitle,
|
||||||
|
TextWrapper,
|
||||||
|
TopLine,
|
||||||
|
VideoFrame,
|
||||||
|
} from "@/components/Media/MediaElements";
|
||||||
|
import img from "@/images/media.svg";
|
||||||
|
|
||||||
|
const interviews = [
|
||||||
|
{
|
||||||
|
href: "https://www.borealisai.com/news/understanding-deep-learning/",
|
||||||
|
text: "Borealis AI",
|
||||||
|
linkText: "interview",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
href: "https://shepherd.com/best-books/machine-learning-and-deep-neural-networks",
|
||||||
|
text: "Shepherd ML book",
|
||||||
|
linkText: "recommendations",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function MediaSection() {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<MediaContainer lightBg={false} id="Media">
|
||||||
|
<MediaWrapper>
|
||||||
|
<MediaRow imgStart={true}>
|
||||||
|
<Column1>
|
||||||
|
<TextWrapper>
|
||||||
|
<TopLine>Media</TopLine>
|
||||||
|
<Heading lightText={true}>
|
||||||
|
Reviews, videos, podcasts, interviews
|
||||||
|
</Heading>
|
||||||
|
<Subtitle darkText={false}>
|
||||||
|
Various resources connected to the book
|
||||||
|
</Subtitle>
|
||||||
|
</TextWrapper>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<ImgWrap>
|
||||||
|
<Img src={img} alt="Media" />
|
||||||
|
</ImgWrap>
|
||||||
|
</Column2>
|
||||||
|
</MediaRow>
|
||||||
|
<MediaRow>
|
||||||
|
<Column1>
|
||||||
|
Machine learning street talk podcast
|
||||||
|
<VideoFrame>
|
||||||
|
<iframe
|
||||||
|
width="100%"
|
||||||
|
height="100%"
|
||||||
|
src="https://www.youtube.com/embed/sJXn4Cl4oww?si=Lm_hQPqj0RXy-75H&controls=0"
|
||||||
|
title="YouTube video player"
|
||||||
|
frameBorder="2"
|
||||||
|
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||||
|
allowfullscreen
|
||||||
|
></iframe>
|
||||||
|
</VideoFrame>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
Deeper insights podcast
|
||||||
|
<VideoFrame>
|
||||||
|
<iframe
|
||||||
|
width="100%"
|
||||||
|
height="100%"
|
||||||
|
src="https://www.youtube.com/embed/nQf4o9TDSHI?si=uMk66zLD7uhuSnQ1&controls=0"
|
||||||
|
title="YouTube video player"
|
||||||
|
frameBorder="2"
|
||||||
|
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||||
|
allowfullscreen
|
||||||
|
></iframe>
|
||||||
|
</VideoFrame>
|
||||||
|
</Column2>
|
||||||
|
</MediaRow>
|
||||||
|
<MediaRow2>
|
||||||
|
<Column1>
|
||||||
|
<TopLine>Reviews</TopLine>
|
||||||
|
<MediaContent>
|
||||||
|
{/* TODO: add dynamic rendering for reviews */}
|
||||||
|
<ul>
|
||||||
|
<li>
|
||||||
|
Nature Machine Intelligence{" "}
|
||||||
|
<MediaLink href="https://github.com/udlbook/udlbook/blob/main/public/NMI_Review.pdf">
|
||||||
|
{" "}
|
||||||
|
review{" "}
|
||||||
|
</MediaLink>{" "}
|
||||||
|
by{" "}
|
||||||
|
<MediaLink href="https://wang-axis.github.io/">
|
||||||
|
Ge Wang
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Amazon{" "}
|
||||||
|
<MediaLink href="https://www.amazon.com/Understanding-Deep-Learning-Simon-Prince-ebook/product-reviews/B0BXKH8XY6/">
|
||||||
|
reviews
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Goodreads{" "}
|
||||||
|
<MediaLink href="https://www.goodreads.com/book/show/123239819-understanding-deep-learning?">
|
||||||
|
reviews{" "}
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Book{" "}
|
||||||
|
<MediaLink href="https://medium.com/@vishalvignesh/udl-book-review-the-new-deep-learning-textbook-youll-want-to-finish-69e1557b018d">
|
||||||
|
review
|
||||||
|
</MediaLink>{" "}
|
||||||
|
by Vishal V.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Amazon{" "}
|
||||||
|
<MediaLink href="https://www.amazon.com/Understanding-Deep-Learning-Simon-Prince-ebook/product-reviews/B0BXKH8XY6/">
|
||||||
|
reviews
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Goodreads{" "}
|
||||||
|
<MediaLink href="https://www.goodreads.com/book/show/123239819-understanding-deep-learning?">
|
||||||
|
reviews{" "}
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
Book{" "}
|
||||||
|
<MediaLink href="https://medium.com/@vishalvignesh/udl-book-review-the-new-deep-learning-textbook-youll-want-to-finish-69e1557b018d">
|
||||||
|
review
|
||||||
|
</MediaLink>{" "}
|
||||||
|
by Vishal V.
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</MediaContent>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<TopLine>Interviews</TopLine>
|
||||||
|
<MediaContent>
|
||||||
|
<ul>
|
||||||
|
{interviews.map((interview, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
{interview.text}{" "}
|
||||||
|
<MediaLink href={interview.href}>
|
||||||
|
{interview.linkText}
|
||||||
|
</MediaLink>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</MediaContent>
|
||||||
|
</Column2>
|
||||||
|
</MediaRow2>
|
||||||
|
</MediaWrapper>
|
||||||
|
</MediaContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
183
src/components/More/MoreElements.jsx
Normal file
183
src/components/More/MoreElements.jsx
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const MoreContainer = styled.div`
|
||||||
|
color: #fff;
|
||||||
|
/* background: #f9f9f9; */
|
||||||
|
background: ${({ lightBg }) => (lightBg ? "#57c6d1" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
padding: 100px 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreWrapper = styled.div`
|
||||||
|
display: grid;
|
||||||
|
z-index: 1;
|
||||||
|
/* height: 1050px; */
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin-right: auto;
|
||||||
|
margin-left: auto;
|
||||||
|
padding: 0 24px;
|
||||||
|
justify-content: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreRow = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: center;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreRow2 = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: top;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column1 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col1;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column2 = styled.div`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col2;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TextWrapper = styled.div`
|
||||||
|
max-width: 540px;
|
||||||
|
padding-top: 0;
|
||||||
|
padding-bottom: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TopLine = styled.p`
|
||||||
|
color: #773c23;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 700;
|
||||||
|
letter-spacing: 1.4px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
margin-bottom: 12px;
|
||||||
|
margin-top: 16px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Heading = styled.h1`
|
||||||
|
margin-bottom: 24px;
|
||||||
|
font-size: 48px;
|
||||||
|
line-height: 1.1;
|
||||||
|
font-weight: 600;
|
||||||
|
color: ${({ lightText }) => (lightText ? "#f7f8fa" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Subtitle = styled.p`
|
||||||
|
max-width: 440px;
|
||||||
|
margin-bottom: 35px;
|
||||||
|
font-size: 18px;
|
||||||
|
line-height: 24px;
|
||||||
|
color: ${({ darkText }) => (darkText ? "#010606" : "#fff")};
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const BtnWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: flex-start;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const ImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Img = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreContent = styled.div`
|
||||||
|
z-index: 3;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
position: static;
|
||||||
|
padding: 8px 0px;
|
||||||
|
margin: 10px 0px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: left;
|
||||||
|
list-style-position: inside;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreOuterList = styled.ul`
|
||||||
|
/* list-style:none; */
|
||||||
|
list-style-position: inside;
|
||||||
|
margin: 0;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreInnerList = styled.ul`
|
||||||
|
list-style-position: inside;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreInnerP = styled.p`
|
||||||
|
padding-left: 18px;
|
||||||
|
padding-bottom: 10px;
|
||||||
|
padding-top: 3px;
|
||||||
|
font-size: 14px;
|
||||||
|
color: #fff;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MoreLink = styled.a`
|
||||||
|
text-decoration: none;
|
||||||
|
color: #555;
|
||||||
|
font-weight: 300;
|
||||||
|
margin: 0 2px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #555;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
933
src/components/More/index.jsx
Normal file
933
src/components/More/index.jsx
Normal file
@@ -0,0 +1,933 @@
|
|||||||
|
import {
|
||||||
|
Column1,
|
||||||
|
Column2,
|
||||||
|
Heading,
|
||||||
|
Img,
|
||||||
|
ImgWrap,
|
||||||
|
MoreContainer,
|
||||||
|
MoreInnerList,
|
||||||
|
MoreInnerP,
|
||||||
|
MoreLink,
|
||||||
|
MoreOuterList,
|
||||||
|
MoreRow,
|
||||||
|
MoreRow2,
|
||||||
|
MoreWrapper,
|
||||||
|
Subtitle,
|
||||||
|
TextWrapper,
|
||||||
|
TopLine,
|
||||||
|
} from "@/components/More/MoreElements";
|
||||||
|
import img from "@/images/more.svg";
|
||||||
|
|
||||||
|
const book = [
|
||||||
|
{
|
||||||
|
text: "Computer vision: models, learning, and inference",
|
||||||
|
link: "http://computervisionmodels.com",
|
||||||
|
details: [
|
||||||
|
"2012 book published with CUP",
|
||||||
|
"Focused on probabilistic models",
|
||||||
|
'Pre-"deep learning"',
|
||||||
|
"Lots of ML content",
|
||||||
|
"Individual chapters available below",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const transformersAndLLMs = [
|
||||||
|
{
|
||||||
|
text: "Intro to LLMs",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/a-high-level-overview-of-large-language-models/",
|
||||||
|
details: [
|
||||||
|
"What is an LLM?",
|
||||||
|
"Pretraining",
|
||||||
|
"Instruction fine-tuning",
|
||||||
|
"Reinforcement learning from human feedback",
|
||||||
|
"Notable LLMs",
|
||||||
|
"LLMs without training from scratch",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Transformers I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-14-transformers-i-introduction/",
|
||||||
|
details: [
|
||||||
|
"Dot-Product self-attention",
|
||||||
|
"Scaled dot-product self-attention",
|
||||||
|
"Position encoding",
|
||||||
|
"Multiple heads",
|
||||||
|
"Transformer block",
|
||||||
|
"Encoders",
|
||||||
|
"Decoders",
|
||||||
|
"Encoder-Decoders",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Transformers II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-16-transformers-ii-extensions/",
|
||||||
|
details: [
|
||||||
|
"Sinusoidal position embeddings",
|
||||||
|
"Learned position embeddings",
|
||||||
|
"Relatives vs. absolute position embeddings",
|
||||||
|
"Extending transformers to longer sequences",
|
||||||
|
"Reducing attention matrix size",
|
||||||
|
"Making attention matrix sparse",
|
||||||
|
"Kernelizing attention computation",
|
||||||
|
"Attention as an RNN",
|
||||||
|
"Attention as a hypernetwork",
|
||||||
|
"Attention as a routing network",
|
||||||
|
"Attention and graphs",
|
||||||
|
"Attention and convolutions",
|
||||||
|
"Attention and gating",
|
||||||
|
"Attention and memory retrieval",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Transformers III",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-17-transformers-iii-training/",
|
||||||
|
details: [
|
||||||
|
"Tricks for training transformers",
|
||||||
|
"Why are these tricks required?",
|
||||||
|
"Removing layer normalization",
|
||||||
|
"Balancing residual dependencies",
|
||||||
|
"Reducing optimizer variance",
|
||||||
|
"How to train deeper transformers on small datasets",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Training and fine-tuning LLMs",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/training-and-fine-tuning-large-language-models/",
|
||||||
|
details: [
|
||||||
|
"Large language models",
|
||||||
|
"Pretraining",
|
||||||
|
"Supervised fine tuning",
|
||||||
|
"Reinforcement learning from human feedback",
|
||||||
|
"Direct preference optimization",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Speeding up inference in LLMs",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/speeding-up-inference-in-transformers/",
|
||||||
|
details: [
|
||||||
|
"Problems with transformers",
|
||||||
|
"Attention-free transformers",
|
||||||
|
"Complexity",
|
||||||
|
"RWKV",
|
||||||
|
"Linear transformers and performers",
|
||||||
|
"Retentive network",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const mathForMachineLearning = [
|
||||||
|
{
|
||||||
|
text: "Linear algebra",
|
||||||
|
link: "https://drive.google.com/file/d/1j2v2n6STPnblOCZ1_GBcVAZrsYkjPYwR/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Vectors and matrices",
|
||||||
|
"Determinant and trace",
|
||||||
|
"Orthogonal matrices",
|
||||||
|
"Null space",
|
||||||
|
"Linear transformations",
|
||||||
|
"Singular value decomposition",
|
||||||
|
"Least squares problems",
|
||||||
|
"Principal direction problems",
|
||||||
|
"Inversion of block matrices",
|
||||||
|
"Schur complement identity",
|
||||||
|
"Sherman-Morrison-Woodbury",
|
||||||
|
"Matrix determinant lemma",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Introduction to probability",
|
||||||
|
link: "https://drive.google.com/file/d/1cmxXneW122-hcfmMRjEE-n5C9T2YvuQX/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Random variables",
|
||||||
|
"Joint probability",
|
||||||
|
"Marginal probability",
|
||||||
|
"Conditional probability",
|
||||||
|
"Bayes' rule",
|
||||||
|
"Independence",
|
||||||
|
"Expectation",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Probability distributions",
|
||||||
|
link: "https://drive.google.com/file/d/1GI3eZNB1CjTqYHLyuRhCV215rwqANVOx/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Bernouilli distribution",
|
||||||
|
"Beta distribution",
|
||||||
|
"Categorical distribution",
|
||||||
|
"Dirichlet distribution",
|
||||||
|
"Univariate normal distribution",
|
||||||
|
"Normal inverse-scaled gamma distribution",
|
||||||
|
"Multivariate normal distribution",
|
||||||
|
"Normal inverse Wishart distribution",
|
||||||
|
"Conjugacy",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Fitting probability distributions",
|
||||||
|
link: "https://drive.google.com/file/d/1DZ4rCmC7AZ8PFc51PiMUIkBO-xqKT_CG/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Maximum likelihood",
|
||||||
|
"Maximum a posteriori",
|
||||||
|
"Bayesian approach",
|
||||||
|
"Example: fitting normal",
|
||||||
|
"Example: fitting categorical",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "The normal distribution",
|
||||||
|
link: "https://drive.google.com/file/d/1CTfmsN-HJWZBRj8lY0ZhgHEbPCmYXWnA/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Types of covariance matrix",
|
||||||
|
"Decomposition of covariance",
|
||||||
|
"Linear transformations",
|
||||||
|
"Marginal distributions",
|
||||||
|
"Conditional distributions",
|
||||||
|
"Product of two normals",
|
||||||
|
"Change of variable formula",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const optimization = [
|
||||||
|
{
|
||||||
|
text: "Gradient-based optimization",
|
||||||
|
link: "https://drive.google.com/file/d/1IoOSfJ0ku89aVyM9qygPl4MVnAhMEbAZ/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Convexity",
|
||||||
|
"Steepest descent",
|
||||||
|
"Newton's method",
|
||||||
|
"Gauss-Newton method",
|
||||||
|
"Line search",
|
||||||
|
"Reparameterization",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Bayesian optimization",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-8-bayesian-optimization/",
|
||||||
|
details: [
|
||||||
|
"Gaussian processes",
|
||||||
|
"Acquisition functions",
|
||||||
|
"Incorporating noise",
|
||||||
|
"Kernel choice",
|
||||||
|
"Learning GP parameters",
|
||||||
|
"Tips, tricks, and limitations",
|
||||||
|
"Beta-Bernoulli bandit",
|
||||||
|
"Random forests for BO",
|
||||||
|
"Tree-Parzen estimators",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "SAT Solvers I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-9-sat-solvers-i-introduction-and-applications/",
|
||||||
|
details: [
|
||||||
|
"Boolean logic and satisfiability",
|
||||||
|
"Conjunctive normal form",
|
||||||
|
"The Tseitin transformation",
|
||||||
|
"SAT and related problems",
|
||||||
|
"SAT constructions",
|
||||||
|
"Graph coloring and scheduling",
|
||||||
|
"Fitting binary neural networks",
|
||||||
|
"Fitting decision trees",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "SAT Solvers II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-10-sat-solvers-ii-algorithms/",
|
||||||
|
details: [
|
||||||
|
"Conditioning",
|
||||||
|
"Resolution",
|
||||||
|
"Solving 2-SAT by unit propagation",
|
||||||
|
"Directional resolution",
|
||||||
|
"SAT as binary search",
|
||||||
|
"DPLL",
|
||||||
|
"Conflict driven clause learning",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "SAT Solvers III",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-11-sat-solvers-iii-factor-graphs-and-smt-solvers/",
|
||||||
|
details: [
|
||||||
|
"Satisfiability vs. problem size",
|
||||||
|
"Factor graph representation",
|
||||||
|
"Max product / sum product for SAT",
|
||||||
|
"Survey propagation",
|
||||||
|
"SAT with non-binary variables",
|
||||||
|
"SMT solvers",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const temporalModels = [
|
||||||
|
{
|
||||||
|
text: "Temporal models",
|
||||||
|
link: "https://drive.google.com/file/d/1rrzGNyZDjXQ3_9ZqCGDmRMM3GYtHSBvj/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Kalman filter",
|
||||||
|
"Smoothing",
|
||||||
|
"Extended Kalman filter",
|
||||||
|
"Unscented Kalman filter",
|
||||||
|
"Particle filtering",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const computerVision = [
|
||||||
|
{
|
||||||
|
text: "Image Processing",
|
||||||
|
link: "https://drive.google.com/file/d/1r3V1GC5grhPF2pD91izuE0hTrTUEpQ9I/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Whitening",
|
||||||
|
"Histogram equalization",
|
||||||
|
"Filtering",
|
||||||
|
"Edges and corners",
|
||||||
|
"Dimensionality reduction",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Pinhole camera",
|
||||||
|
link: "https://drive.google.com/file/d/1dbMBE13MWcd84dEGjYeWsC6eXouoC0xn/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Pinhole camera model",
|
||||||
|
"Radial distortion",
|
||||||
|
"Homogeneous coordinates",
|
||||||
|
"Learning extrinsic parameters",
|
||||||
|
"Learning intrinsic parameters",
|
||||||
|
"Inferring three-dimensional world points",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Geometric transformations",
|
||||||
|
link: "https://drive.google.com/file/d/1UArrb1ovqvZHbv90MufkW372r__ZZACQ/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Euclidean, similarity, affine, projective transformations",
|
||||||
|
"Fitting transformation models",
|
||||||
|
"Inference in transformation models",
|
||||||
|
"Three geometric problems for planes",
|
||||||
|
"Transformations between images",
|
||||||
|
"Robust learning of transformations",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Multiple cameras",
|
||||||
|
link: "https://drive.google.com/file/d/1RqUoc7kvK8vqZF1NVuw7bIex9v4_QlSx/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Two view geometry",
|
||||||
|
"The essential matrix",
|
||||||
|
"The fundamental matrix",
|
||||||
|
"Two-view reconstruction pipeline",
|
||||||
|
"Rectification",
|
||||||
|
"Multiview reconstruction",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const reinforcementLearning = [
|
||||||
|
{
|
||||||
|
text: "Transformers in RL",
|
||||||
|
link: "https://arxiv.org/abs/2307.05979",
|
||||||
|
details: [
|
||||||
|
"Challenges in RL",
|
||||||
|
"Advantages of transformers for RL",
|
||||||
|
"Representation learning",
|
||||||
|
"Transition function learning",
|
||||||
|
"Reward learning",
|
||||||
|
"Policy learning",
|
||||||
|
"Training strategy",
|
||||||
|
"Interpretability",
|
||||||
|
"Applications",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const aiTheory = [
|
||||||
|
{
|
||||||
|
text: "Gradient flow",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/gradient-flow/",
|
||||||
|
details: [
|
||||||
|
"Gradient flow",
|
||||||
|
"Evolution of residual",
|
||||||
|
"Evolution of parameters",
|
||||||
|
"Evolution of model predictions",
|
||||||
|
"Evolution of prediction covariance",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Neural tangent kernel",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/the-neural-tangent-kernel/",
|
||||||
|
details: [
|
||||||
|
"Infinite width neural networks",
|
||||||
|
"Training dynamics",
|
||||||
|
"Empirical NTK for shallow network",
|
||||||
|
"Analytical NTK for shallow network",
|
||||||
|
"Empirical NTK for deep network",
|
||||||
|
"Analytical NTK for deep network",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "NTK applications",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/neural-tangent-kernel-applications/",
|
||||||
|
details: [
|
||||||
|
"Trainability",
|
||||||
|
"Convergence bounds",
|
||||||
|
"Evolution of parameters",
|
||||||
|
"Evolution of predictions",
|
||||||
|
"NTK Gaussian processes",
|
||||||
|
"NTK and generalizability",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const unsupervisedLearning = [
|
||||||
|
{
|
||||||
|
text: "Modeling complex data densities",
|
||||||
|
link: "https://drive.google.com/file/d/1BrPHxAuyz28hhz_FtbO0A1cWYdMs2_h8/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Hidden variables",
|
||||||
|
"Expectation maximization",
|
||||||
|
"Mixture of Gaussians",
|
||||||
|
"The t-distribution",
|
||||||
|
"Factor analysis",
|
||||||
|
"The EM algorithm in detail",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Variational autoencoders",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-5-variational-auto-encoders/",
|
||||||
|
details: [
|
||||||
|
"Non-linear latent variable models",
|
||||||
|
"Evidence lower bound (ELBO)",
|
||||||
|
"ELBO properties",
|
||||||
|
"Variational approximation",
|
||||||
|
"The variational autoencoder",
|
||||||
|
"Reparameterization trick",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Normalizing flows: introduction and review",
|
||||||
|
link: "https://arxiv.org/abs/1908.09257",
|
||||||
|
details: [
|
||||||
|
"Normalizing flows",
|
||||||
|
"Elementwise and linear flows",
|
||||||
|
"Planar and radial flows",
|
||||||
|
"Coupling and auto-regressive flows",
|
||||||
|
"Coupling functions",
|
||||||
|
"Residual flows",
|
||||||
|
"Infinitesimal (continuous) flows",
|
||||||
|
"Datasets and performance",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const graphicalModels = [
|
||||||
|
{
|
||||||
|
text: "Graphical models",
|
||||||
|
link: "https://drive.google.com/file/d/1ghgeRmeZMyzNHcuzVwS4vRP6BXi3npVO/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Conditional independence",
|
||||||
|
"Directed graphical models",
|
||||||
|
"Undirected graphical models",
|
||||||
|
"Inference in graphical models",
|
||||||
|
"Sampling in graphical models",
|
||||||
|
"Learning in graphical models",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Models for chains and trees",
|
||||||
|
link: "https://drive.google.com/file/d/1WAMc3wtZoPv5wRkdF-D0SShVYF6Net84/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Hidden Markov models",
|
||||||
|
"Viterbi algorithm",
|
||||||
|
"Forward-backward algorithm",
|
||||||
|
"Belief propagation",
|
||||||
|
"Sum product algorithm",
|
||||||
|
"Extension to trees",
|
||||||
|
"Graphs with loops",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Models for grids",
|
||||||
|
link: "https://drive.google.com/file/d/1qqS9OfA1z7t12M45UaBr4CSCj1jwzcwz/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Markov random fields",
|
||||||
|
"MAP inference in binary pairwise MRFs",
|
||||||
|
"Graph cuts",
|
||||||
|
"Multi-label pairwise MRFs",
|
||||||
|
"Alpha-expansion algorithm",
|
||||||
|
"Conditional random fields",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const machineLearning = [
|
||||||
|
{
|
||||||
|
text: "Learning and inference",
|
||||||
|
link: "https://drive.google.com/file/d/1ArWWi-qbzK2ih6KpOeIF8wX5g3S4J5DY/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Discriminative models",
|
||||||
|
"Generative models",
|
||||||
|
"Example: regression",
|
||||||
|
"Example: classification",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Regression models",
|
||||||
|
link: "https://drive.google.com/file/d/1QZX5jm4xN8rhpvdjRsFP5Ybw1EXSNGaL/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Linear regression",
|
||||||
|
"Bayesian linear regression",
|
||||||
|
"Non-linear regression",
|
||||||
|
"Bayesian non-linear regression",
|
||||||
|
"The kernel trick",
|
||||||
|
"Gaussian process regression",
|
||||||
|
"Sparse linear regression",
|
||||||
|
"Relevance vector regression",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Classification models",
|
||||||
|
link: "https://drive.google.com/file/d/1-_f4Yfm8iBWcaZ2Gyjw6O0eZiODipmSV/view?usp=sharing",
|
||||||
|
details: [
|
||||||
|
"Logistic regression",
|
||||||
|
"Bayesian logistic regression",
|
||||||
|
"Non-linear logistic regression",
|
||||||
|
"Gaussian process classification",
|
||||||
|
"Relevance vector classification",
|
||||||
|
"Incremental fitting: boosting and trees",
|
||||||
|
"Multi-class logistic regression",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Few-shot learning and meta-learning I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-2-few-shot-learning-and-meta-learning-i/",
|
||||||
|
details: [
|
||||||
|
"Meta-learning framework",
|
||||||
|
"Approaches to meta-learning",
|
||||||
|
"Matching networks",
|
||||||
|
"Prototypical networks",
|
||||||
|
"Relation networks",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Few-shot learning and meta-learning II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-3-few-shot-learning-and-meta-learning-ii/",
|
||||||
|
details: [
|
||||||
|
"MAML & Reptile",
|
||||||
|
"LSTM based meta-learning",
|
||||||
|
"Reinforcement learning based approaches",
|
||||||
|
"Memory augmented neural networks",
|
||||||
|
"SNAIL",
|
||||||
|
"Generative models",
|
||||||
|
"Data augmentation approaches",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const nlp = [
|
||||||
|
{
|
||||||
|
text: "Neural natural language generation I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-6-neural-natural-language-generation-decoding-algorithms/",
|
||||||
|
details: [
|
||||||
|
"Encoder-decoder architecture",
|
||||||
|
"Maximum-likelihood training",
|
||||||
|
"Greedy search",
|
||||||
|
"Beam search",
|
||||||
|
"Diverse beam search",
|
||||||
|
"Top-k sampling",
|
||||||
|
"Nucleus sampling",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Neural natural language generation II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-7-neural-natural-language-generation-sequence-level-training/",
|
||||||
|
details: [
|
||||||
|
"Fine-tuning with reinforcement learning",
|
||||||
|
"Training from scratch with RL",
|
||||||
|
"RL vs. structured prediction",
|
||||||
|
"Minimum risk training",
|
||||||
|
"Scheduled sampling",
|
||||||
|
"Beam search optimization",
|
||||||
|
"SeaRNN",
|
||||||
|
"Reward-augmented maximum likelihood",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Parsing I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-15-parsing-i-context-free-grammars-and-cyk-algorithm/",
|
||||||
|
details: [
|
||||||
|
"Parse trees",
|
||||||
|
"Context-free grammars",
|
||||||
|
"Chomsky normal form",
|
||||||
|
"CYK recognition algorithm",
|
||||||
|
"Worked example",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Parsing II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-18-parsing-ii-wcfgs-inside-algorithm-and-weighted-parsing/",
|
||||||
|
details: [
|
||||||
|
"Weighted context-free grammars",
|
||||||
|
"Semirings",
|
||||||
|
"Inside algorithm",
|
||||||
|
"Inside weights",
|
||||||
|
"Weighted parsing",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Parsing III",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-19-parsing-iii-pcfgs-and-inside-outside-algorithm/",
|
||||||
|
details: [
|
||||||
|
"Probabilistic context-free grammars",
|
||||||
|
"Parameter estimation (supervised)",
|
||||||
|
"Parameter estimation (unsupervised)",
|
||||||
|
"Viterbi training",
|
||||||
|
"Expectation maximization",
|
||||||
|
"Outside from inside",
|
||||||
|
"Interpretation of outside weights",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "XLNet",
|
||||||
|
link: "https://www.borealisai.com/en/blog/understanding-xlnet/",
|
||||||
|
details: [
|
||||||
|
"Language modeling",
|
||||||
|
"XLNet training objective",
|
||||||
|
"Permutations",
|
||||||
|
"Attention mask",
|
||||||
|
"Two stream self-attention",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const responsibleAI = [
|
||||||
|
{
|
||||||
|
text: "Bias and fairness",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial1-bias-and-fairness-ai/",
|
||||||
|
details: [
|
||||||
|
"Sources of bias",
|
||||||
|
"Demographic Parity",
|
||||||
|
"Equality of odds",
|
||||||
|
"Equality of opportunity",
|
||||||
|
"Individual fairness",
|
||||||
|
"Bias mitigation",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Explainability I",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/explainability-i-local-post-hoc-explanations/",
|
||||||
|
details: [
|
||||||
|
"Taxonomy of XAI approaches",
|
||||||
|
"Local post-hoc explanations",
|
||||||
|
"Individual conditional explanation",
|
||||||
|
"Counterfactual explanations",
|
||||||
|
"LIME & Anchors",
|
||||||
|
"Shapley additive explanations & SHAP",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Explainability II",
|
||||||
|
link: "https://www.borealisai.com/research-blogs/explainability-ii-global-explanations-proxy-models-and-interpretable-models/",
|
||||||
|
details: [
|
||||||
|
"Global feature importance",
|
||||||
|
"Partial dependence & ICE plots",
|
||||||
|
"Accumulated local effects",
|
||||||
|
"Aggregate SHAP values",
|
||||||
|
"Prototypes & criticisms",
|
||||||
|
"Surrogate / proxy models",
|
||||||
|
"Inherently interpretable models",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Differential privacy I",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-12-differential-privacy-i-introduction/",
|
||||||
|
details: [
|
||||||
|
"Early approaches to privacy",
|
||||||
|
"Fundamental law of information recovery",
|
||||||
|
"Differential privacy",
|
||||||
|
"Properties of differential privacy",
|
||||||
|
"The Laplace mechanism",
|
||||||
|
"Examples",
|
||||||
|
"Other mechanisms and definitions",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Differential privacy II",
|
||||||
|
link: "https://www.borealisai.com/en/blog/tutorial-13-differential-privacy-ii-machine-learning-and-data-generation/",
|
||||||
|
details: [
|
||||||
|
"Differential privacy and matchine learning",
|
||||||
|
"DPSGD",
|
||||||
|
"PATE",
|
||||||
|
"Differentially private data generation",
|
||||||
|
"DPGAN",
|
||||||
|
"PateGAN",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function MoreSection() {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<MoreContainer lightBg={true} id="More">
|
||||||
|
<MoreWrapper>
|
||||||
|
<MoreRow imgStart={false}>
|
||||||
|
<Column1>
|
||||||
|
<TextWrapper>
|
||||||
|
<TopLine>More</TopLine>
|
||||||
|
<Heading lightText={false}>Further reading</Heading>
|
||||||
|
<Subtitle darkText={true}>
|
||||||
|
Other articles, blogs, and books that I have written. Most in a
|
||||||
|
similar style and using the same notation as Understanding Deep
|
||||||
|
Learning.
|
||||||
|
</Subtitle>
|
||||||
|
</TextWrapper>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<ImgWrap>
|
||||||
|
<Img src={img} alt="More" />
|
||||||
|
</ImgWrap>
|
||||||
|
</Column2>
|
||||||
|
</MoreRow>
|
||||||
|
<MoreRow2>
|
||||||
|
<Column1>
|
||||||
|
<TopLine>Book</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{book.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Transformers & LLMs</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{transformersAndLLMs.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Math for machine learning</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{mathForMachineLearning.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Optimization</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{optimization.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Temporal models</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{temporalModels.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Computer vision</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{computerVision.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Reinforcement learning</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{reinforcementLearning.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
</Column1>
|
||||||
|
|
||||||
|
<Column2>
|
||||||
|
<TopLine>AI Theory</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{aiTheory.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Unsupervised learning</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{unsupervisedLearning.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Graphical Models</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{graphicalModels.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Machine learning</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{machineLearning.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Natural language processing</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{nlp.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
|
||||||
|
<TopLine>Responsible AI</TopLine>
|
||||||
|
<MoreOuterList>
|
||||||
|
{responsibleAI.map((item, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
<MoreLink href={item.link} target="_blank" rel="noreferrer">
|
||||||
|
{item.text}
|
||||||
|
</MoreLink>
|
||||||
|
<MoreInnerP>
|
||||||
|
<MoreInnerList>
|
||||||
|
{item.details.map((detail, index) => (
|
||||||
|
<li key={index}>{detail}</li>
|
||||||
|
))}
|
||||||
|
</MoreInnerList>
|
||||||
|
</MoreInnerP>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</MoreOuterList>
|
||||||
|
</Column2>
|
||||||
|
</MoreRow2>
|
||||||
|
</MoreWrapper>
|
||||||
|
</MoreContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
119
src/components/Navbar/NavbarElements.jsx
Executable file
119
src/components/Navbar/NavbarElements.jsx
Executable file
@@ -0,0 +1,119 @@
|
|||||||
|
import { Link as LinkR } from "react-router-dom";
|
||||||
|
import { Link as LinkS } from "react-scroll";
|
||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const Nav = styled.nav`
|
||||||
|
background: ${({ scrollNav }) => (scrollNav ? "#000" : "transparent")};
|
||||||
|
height: 100px;
|
||||||
|
margin-top: -100px;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
font-size: 1rem;
|
||||||
|
position: sticky;
|
||||||
|
top: 0;
|
||||||
|
z-index: 10;
|
||||||
|
|
||||||
|
@media screen and (max-width: 960px) {
|
||||||
|
transition: 0.8s all ease;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavbarContainer = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
height: 100px;
|
||||||
|
z-index: 1;
|
||||||
|
width: 100%;
|
||||||
|
padding: 0 24px;
|
||||||
|
max-width: 1100px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavLogo = styled(LinkR)`
|
||||||
|
color: #fff;
|
||||||
|
justify-self: flex-start;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 1.5rem;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
margin-left: 24px;
|
||||||
|
font-weight: bold;
|
||||||
|
text-decoration: none;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 1rem;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const MobileIcon = styled.div`
|
||||||
|
display: none;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
display: block;
|
||||||
|
position: absolute;
|
||||||
|
top: 0;
|
||||||
|
right: 0;
|
||||||
|
transform: translate(-100%, 60%);
|
||||||
|
font-size: 1.8rem;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavMenu = styled.ul`
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
list-style: none;
|
||||||
|
text-align: center;
|
||||||
|
margin-right: -22px;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavItem = styled.li`
|
||||||
|
height: 80px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavBtn = styled.nav`
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavLinks = styled(LinkS)`
|
||||||
|
color: #fff;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
text-decoration: none;
|
||||||
|
padding: 0 1rem;
|
||||||
|
height: 100%;
|
||||||
|
cursor: pointer;
|
||||||
|
|
||||||
|
&.active {
|
||||||
|
border-bottom: 3px solid #57c6d1;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NavBtnLink = styled(LinkR)`
|
||||||
|
border-radius: 50px;
|
||||||
|
background: #01bf71;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 10px 22px;
|
||||||
|
color: #010606;
|
||||||
|
font-size: 16px;
|
||||||
|
outline: none;
|
||||||
|
border: none;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s ease-in-out;
|
||||||
|
text-decoration: none;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
transition: all 0.2s ease-in-out;
|
||||||
|
background: #fff;
|
||||||
|
color: #010606;
|
||||||
|
}
|
||||||
|
`;
|
||||||
104
src/components/Navbar/index.jsx
Executable file
104
src/components/Navbar/index.jsx
Executable file
@@ -0,0 +1,104 @@
|
|||||||
|
import {
|
||||||
|
MobileIcon,
|
||||||
|
Nav,
|
||||||
|
NavbarContainer,
|
||||||
|
NavItem,
|
||||||
|
NavLinks,
|
||||||
|
NavLogo,
|
||||||
|
NavMenu,
|
||||||
|
} from "@/components/Navbar/NavbarElements";
|
||||||
|
import { useEffect, useState } from "react";
|
||||||
|
import { FaBars } from "react-icons/fa";
|
||||||
|
import { IconContext } from "react-icons/lib";
|
||||||
|
import { animateScroll as scroll } from "react-scroll";
|
||||||
|
|
||||||
|
export default function Navbar({ toggle }) {
|
||||||
|
const [scrollNav, setScrollNav] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const changeNav = () => {
|
||||||
|
setScrollNav(window.scrollY >= 80);
|
||||||
|
};
|
||||||
|
|
||||||
|
window.addEventListener("scroll", changeNav);
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
window.removeEventListener("scroll", changeNav);
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const scrollToHome = () => {
|
||||||
|
scroll.scrollToTop();
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<IconContext.Provider value={{ color: "#fff" }}>
|
||||||
|
<Nav scrollNav={scrollNav}>
|
||||||
|
<NavbarContainer>
|
||||||
|
<NavLogo to="/udlbook/" onClick={scrollToHome}>
|
||||||
|
<h1> Understanding Deep Learning </h1>
|
||||||
|
</NavLogo>
|
||||||
|
<MobileIcon onClick={toggle}>
|
||||||
|
<FaBars />
|
||||||
|
</MobileIcon>
|
||||||
|
<NavMenu>
|
||||||
|
<NavItem>
|
||||||
|
<NavLinks
|
||||||
|
to="Notebooks"
|
||||||
|
smooth={true}
|
||||||
|
duration={500}
|
||||||
|
spy={true}
|
||||||
|
exact="true"
|
||||||
|
offset={-80}
|
||||||
|
activeClass="active"
|
||||||
|
>
|
||||||
|
Notebooks
|
||||||
|
</NavLinks>
|
||||||
|
</NavItem>
|
||||||
|
<NavItem>
|
||||||
|
<NavLinks
|
||||||
|
to="Instructors"
|
||||||
|
smooth={true}
|
||||||
|
duration={500}
|
||||||
|
spy={true}
|
||||||
|
exact="true"
|
||||||
|
offset={-80}
|
||||||
|
activeClass="active"
|
||||||
|
>
|
||||||
|
Instructors
|
||||||
|
</NavLinks>
|
||||||
|
</NavItem>
|
||||||
|
<NavItem>
|
||||||
|
<NavLinks
|
||||||
|
to="Media"
|
||||||
|
smooth={true}
|
||||||
|
duration={500}
|
||||||
|
spy={true}
|
||||||
|
exact="true"
|
||||||
|
offset={-80}
|
||||||
|
activeClass="active"
|
||||||
|
>
|
||||||
|
Media
|
||||||
|
</NavLinks>
|
||||||
|
</NavItem>
|
||||||
|
<NavItem>
|
||||||
|
<NavLinks
|
||||||
|
to="More"
|
||||||
|
smooth={true}
|
||||||
|
duration={500}
|
||||||
|
spy={true}
|
||||||
|
exact="true"
|
||||||
|
offset={-80}
|
||||||
|
activeClass="active"
|
||||||
|
>
|
||||||
|
More
|
||||||
|
</NavLinks>
|
||||||
|
</NavItem>
|
||||||
|
</NavMenu>
|
||||||
|
</NavbarContainer>
|
||||||
|
</Nav>
|
||||||
|
</IconContext.Provider>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
147
src/components/Notebooks/NotebookElements.jsx
Normal file
147
src/components/Notebooks/NotebookElements.jsx
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const NotebookContainer = styled.div`
|
||||||
|
color: #fff;
|
||||||
|
/* background: #f9f9f9; */
|
||||||
|
background: ${({ lightBg }) => (lightBg ? "#f9f9f9" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
padding: 100px 0;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NotebookWrapper = styled.div`
|
||||||
|
display: grid;
|
||||||
|
z-index: 1;
|
||||||
|
/* height: 1250px; */
|
||||||
|
width: 100%;
|
||||||
|
max-width: 1100px;
|
||||||
|
margin-right: auto;
|
||||||
|
margin-left: auto;
|
||||||
|
padding: 0 24px;
|
||||||
|
justify-content: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NotebookRow = styled.div`
|
||||||
|
display: grid;
|
||||||
|
grid-auto-columns: minmax(auto, 1fr);
|
||||||
|
align-items: center;
|
||||||
|
grid-template-areas: ${({ imgStart }) => (imgStart ? `'col2 col1'` : `'col1 col2'`)};
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
grid-template-areas: ${({ imgStart }) =>
|
||||||
|
imgStart ? `'col1' 'col2'` : `'col1 col1' 'col2 col2'`};
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column1 = styled.p`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col1;
|
||||||
|
|
||||||
|
@media screen and (max-width: 1050px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 10px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Column2 = styled.p`
|
||||||
|
margin-bottom: 15px;
|
||||||
|
padding: 0 15px;
|
||||||
|
grid-area: col2;
|
||||||
|
|
||||||
|
@media screen and (max-width: 1050px) {
|
||||||
|
font-size: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 768px) {
|
||||||
|
font-size: 10px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TextWrapper = styled.div`
|
||||||
|
max-width: 540px;
|
||||||
|
padding-top: 0;
|
||||||
|
padding-bottom: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const TopLine = styled.p`
|
||||||
|
color: #57c6d1;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 16px;
|
||||||
|
font-weight: 700;
|
||||||
|
letter-spacing: 1.4px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
margin-bottom: 16px;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Heading = styled.h1`
|
||||||
|
margin-bottom: 24px;
|
||||||
|
font-size: 48px;
|
||||||
|
line-height: 1.1;
|
||||||
|
font-weight: 600;
|
||||||
|
color: ${({ lightText }) => (lightText ? "#f7f8fa" : "#010606")};
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
font-size: 32px;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Subtitle = styled.p`
|
||||||
|
max-width: 440px;
|
||||||
|
margin-bottom: 35px;
|
||||||
|
font-size: 18px;
|
||||||
|
line-height: 24px;
|
||||||
|
color: ${({ darkText }) => (darkText ? "#010606" : "#fff")};
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const BtnWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: flex-start;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const ImgWrap = styled.div`
|
||||||
|
max-width: 555px;
|
||||||
|
height: 100%;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Img = styled.img`
|
||||||
|
width: 100%;
|
||||||
|
margin-top: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
margin-left: 10px;
|
||||||
|
padding-right: 0;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const NBLink = styled.a`
|
||||||
|
text-decoration: none;
|
||||||
|
color: #57c6d1;
|
||||||
|
font-weight: 300;
|
||||||
|
margin: 0 2px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&:before {
|
||||||
|
position: absolute;
|
||||||
|
margin: 0 auto;
|
||||||
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: 2px;
|
||||||
|
background-color: #57c6d1;
|
||||||
|
content: "";
|
||||||
|
opacity: 0.3;
|
||||||
|
-webkit-transform: scaleX(1);
|
||||||
|
transition-property:
|
||||||
|
opacity,
|
||||||
|
-webkit-transform;
|
||||||
|
transition-duration: 0.3s;
|
||||||
|
}
|
||||||
|
|
||||||
|
&:hover:before {
|
||||||
|
opacity: 1;
|
||||||
|
-webkit-transform: scaleX(1.05);
|
||||||
|
}
|
||||||
|
`;
|
||||||
344
src/components/Notebooks/index.jsx
Normal file
344
src/components/Notebooks/index.jsx
Normal file
@@ -0,0 +1,344 @@
|
|||||||
|
import {
|
||||||
|
Column1,
|
||||||
|
Column2,
|
||||||
|
Heading,
|
||||||
|
Img,
|
||||||
|
ImgWrap,
|
||||||
|
NBLink,
|
||||||
|
NotebookContainer,
|
||||||
|
NotebookRow,
|
||||||
|
NotebookWrapper,
|
||||||
|
Subtitle,
|
||||||
|
TextWrapper,
|
||||||
|
TopLine,
|
||||||
|
} from "@/components/Notebooks/NotebookElements";
|
||||||
|
import img from "@/images/coding.svg";
|
||||||
|
|
||||||
|
const notebooks = [
|
||||||
|
{
|
||||||
|
text: "Notebook 1.1 - Background mathematics",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap01/1_1_BackgroundMathematics.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 2.1 - Supervised learning",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap02/2_1_Supervised_Learning.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 3.1 - Shallow networks I",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_1_Shallow_Networks_I.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 3.2 - Shallow networks II",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_2_Shallow_Networks_II.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 3.3 - Shallow network regions",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_3_Shallow_Network_Regions.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 3.4 - Activation functions",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap03/3_4_Activation_Functions.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 4.1 - Composing networks",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_1_Composing_Networks.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 4.2 - Clipping functions",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_2_Clipping_functions.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 4.3 - Deep networks",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap04/4_3_Deep_Networks.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 5.1 - Least squares loss",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 5.2 - Binary cross-entropy loss",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_2_Binary_Cross_Entropy_Loss.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 5.3 - Multiclass cross-entropy loss",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap05/5_3_Multiclass_Cross_entropy_Loss.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 6.1 - Line search",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_1_Line_Search.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 6.2 - Gradient descent",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_2_Gradient_Descent.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 6.3 - Stochastic gradient descent",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 6.4 - Momentum",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_4_Momentum.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 6.5 - Adam",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap06/6_5_Adam.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 7.1 - Backpropagation in toy model",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_1_Backpropagation_in_Toy_Model.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 7.2 - Backpropagation",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_2_Backpropagation.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 7.3 - Initialization",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap07/7_3_Initialization.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 8.1 - MNIST-1D performance",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 8.2 - Bias-variance trade-off",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 8.3 - Double descent",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_3_Double_Descent.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 8.4 - High-dimensional spaces",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 9.1 - L2 regularization",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_1_L2_Regularization.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 9.2 - Implicit regularization",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_2_Implicit_Regularization.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 9.3 - Ensembling",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_3_Ensembling.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 9.4 - Bayesian approach",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 9.5 - Augmentation",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap09/9_5_Augmentation.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 10.1 - 1D convolution",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_1_1D_Convolution.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 10.2 - Convolution for MNIST-1D",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_2_Convolution_for_MNIST_1D.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 10.3 - 2D convolution",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_3_2D_Convolution.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 10.4 - Downsampling & upsampling",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 10.5 - Convolution for MNIST",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap10/10_5_Convolution_For_MNIST.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 11.1 - Shattered gradients",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_1_Shattered_Gradients.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 11.2 - Residual networks",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_2_Residual_Networks.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 11.3 - Batch normalization",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap11/11_3_Batch_Normalization.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 12.1 - Self-attention",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_1_Self_Attention.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 12.2 - Multi-head self-attention",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 12.3 - Tokenization",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_3_Tokenization.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 12.4 - Decoding strategies",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap12/12_4_Decoding_Strategies.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 13.1 - Encoding graphs",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_1_Graph_Representation.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 13.2 - Graph classification",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_2_Graph_Classification.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 13.3 - Neighborhood sampling",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_3_Neighborhood_Sampling.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 13.4 - Graph attention",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap13/13_4_Graph_Attention_Networks.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 15.1 - GAN toy example",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 15.2 - Wasserstein distance",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 16.1 - 1D normalizing flows",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_1_1D_Normalizing_Flows.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 16.2 - Autoregressive flows",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 16.3 - Contraction mappings",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap16/16_3_Contraction_Mappings.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 17.1 - Latent variable models",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_1_Latent_Variable_Models.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 17.2 - Reparameterization trick",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 17.3 - Importance sampling",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap17/17_3_Importance_Sampling.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 18.1 - Diffusion encoder",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 18.2 - 1D diffusion model",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 18.3 - Reparameterized model",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_3_Reparameterized_Model.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 18.4 - Families of diffusion models",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap18/18_4_Families_of_Diffusion_Models.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 19.1 - Markov decision processes",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 19.2 - Dynamic programming",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_2_Dynamic_Programming.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 19.3 - Monte-Carlo methods",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_3_Monte_Carlo_Methods.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 19.4 - Temporal difference methods",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 19.5 - Control variates",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap19/19_5_Control_Variates.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 20.1 - Random data",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_1_Random_Data.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 20.2 - Full-batch gradient descent",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_2_Full_Batch_Gradient_Descent.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 20.3 - Lottery tickets",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_3_Lottery_Tickets.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 20.4 - Adversarial attacks",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap20/20_4_Adversarial_Attacks.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 21.1 - Bias mitigation",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: "Notebook 21.2 - Explainability",
|
||||||
|
link: "https://github.com/udlbook/udlbook/blob/main/Notebooks/Chap21/21_2_Explainability.ipynb",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
export default function NotebookSection() {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<NotebookContainer lightBg={false} id="Notebooks">
|
||||||
|
<NotebookWrapper>
|
||||||
|
<NotebookRow imgStart={true}>
|
||||||
|
<Column1>
|
||||||
|
<TextWrapper>
|
||||||
|
<TopLine>Coding exercises</TopLine>
|
||||||
|
<Heading lightText={true}>
|
||||||
|
Python notebooks covering the whole text
|
||||||
|
</Heading>
|
||||||
|
<Subtitle darkText={false}>
|
||||||
|
Sixty eight python notebook exercises with missing code to fill
|
||||||
|
in based on the text
|
||||||
|
</Subtitle>
|
||||||
|
</TextWrapper>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<ImgWrap>
|
||||||
|
<Img src={img} alt="Coding" />
|
||||||
|
</ImgWrap>
|
||||||
|
</Column2>
|
||||||
|
</NotebookRow>
|
||||||
|
<NotebookRow>
|
||||||
|
<Column1>
|
||||||
|
<ul>
|
||||||
|
{/* render first half of notebooks*/}
|
||||||
|
{notebooks.slice(0, notebooks.length / 2).map((notebook, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
{notebook.text}:{" "}
|
||||||
|
<NBLink href={notebook.link}>ipynb/colab</NBLink>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</Column1>
|
||||||
|
<Column2>
|
||||||
|
<ul>
|
||||||
|
{/* render second half of notebooks*/}
|
||||||
|
{notebooks.slice(notebooks.length / 2).map((notebook, index) => (
|
||||||
|
<li key={index}>
|
||||||
|
{notebook.text}:{" "}
|
||||||
|
<NBLink href={notebook.link}>ipynb/colab</NBLink>
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</Column2>
|
||||||
|
</NotebookRow>
|
||||||
|
</NotebookWrapper>
|
||||||
|
</NotebookContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
96
src/components/Sidebar/SidebarElements.jsx
Executable file
96
src/components/Sidebar/SidebarElements.jsx
Executable file
@@ -0,0 +1,96 @@
|
|||||||
|
import { FaTimes } from "react-icons/fa";
|
||||||
|
import { Link as LinkR } from "react-router-dom";
|
||||||
|
import { Link as LinkS } from "react-scroll";
|
||||||
|
import styled from "styled-components";
|
||||||
|
|
||||||
|
export const SidebarContainer = styled.aside`
|
||||||
|
position: fixed;
|
||||||
|
z-index: 999;
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
background: #0d0d0d;
|
||||||
|
display: grid;
|
||||||
|
align-items: center;
|
||||||
|
top: 0;
|
||||||
|
left: 0;
|
||||||
|
transition: 0.3s ease-in-out;
|
||||||
|
opacity: ${({ isOpen }) => (isOpen ? "100%" : "0")};
|
||||||
|
top: ${({ isOpen }) => (isOpen ? "0" : "-100%")};
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const CloseIcon = styled(FaTimes)`
|
||||||
|
color: #fff;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
color: #01bf71;
|
||||||
|
transition: 0.2s ease-in-out;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const Icon = styled.div`
|
||||||
|
position: absolute;
|
||||||
|
top: 1.2rem;
|
||||||
|
right: 1.5rem;
|
||||||
|
background: transparent;
|
||||||
|
font-size: 2rem;
|
||||||
|
cursor: pointer;
|
||||||
|
outline: none;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SidebarWrapper = styled.div`
|
||||||
|
color: #ffffff;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SidebarMenu = styled.ul`
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
grid-template-rows: repeat(6, 80px);
|
||||||
|
text-align: center;
|
||||||
|
|
||||||
|
@media screen and (max-width: 480px) {
|
||||||
|
grid-template-rows: repeat(6, 60px);
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SidebarLink = styled(LinkS)`
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
font-size: 1.5rem;
|
||||||
|
text-decoration: none;
|
||||||
|
list-style: none;
|
||||||
|
transition: 0.2s ease-in-out;
|
||||||
|
text-decoration: none;
|
||||||
|
color: #fff;
|
||||||
|
cursor: pointer;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
color: #01bf71;
|
||||||
|
transition: 0.2s ease-in-out;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SideBtnWrap = styled.div`
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
`;
|
||||||
|
|
||||||
|
export const SidebarRoute = styled(LinkR)`
|
||||||
|
border-radius: 50px;
|
||||||
|
background: #01bf71;
|
||||||
|
white-space: nowrap;
|
||||||
|
padding: 16px 46px;
|
||||||
|
color: #010606;
|
||||||
|
font-size: 16px;
|
||||||
|
outline: none;
|
||||||
|
border: none;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s ease-in-out;
|
||||||
|
text-decoration: none;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
transition: all 0.2s ease-in-out;
|
||||||
|
background: #fff;
|
||||||
|
color: #010606;
|
||||||
|
}
|
||||||
|
`;
|
||||||
36
src/components/Sidebar/index.jsx
Executable file
36
src/components/Sidebar/index.jsx
Executable file
@@ -0,0 +1,36 @@
|
|||||||
|
import {
|
||||||
|
CloseIcon,
|
||||||
|
Icon,
|
||||||
|
SidebarContainer,
|
||||||
|
SidebarLink,
|
||||||
|
SidebarMenu,
|
||||||
|
SidebarWrapper,
|
||||||
|
} from "@/components/Sidebar/SidebarElements";
|
||||||
|
|
||||||
|
export default function Sidebar({ isOpen, toggle }) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<SidebarContainer isOpen={isOpen} onClick={toggle}>
|
||||||
|
<Icon onClick={toggle}>
|
||||||
|
<CloseIcon />
|
||||||
|
</Icon>
|
||||||
|
<SidebarWrapper>
|
||||||
|
<SidebarMenu>
|
||||||
|
<SidebarLink to="Notebooks" onClick={toggle}>
|
||||||
|
Notebooks
|
||||||
|
</SidebarLink>
|
||||||
|
<SidebarLink to="Instructors" onClick={toggle}>
|
||||||
|
Instructors
|
||||||
|
</SidebarLink>
|
||||||
|
<SidebarLink to="Media" onClick={toggle}>
|
||||||
|
Media
|
||||||
|
</SidebarLink>
|
||||||
|
<SidebarLink to="More" onClick={toggle}>
|
||||||
|
More
|
||||||
|
</SidebarLink>
|
||||||
|
</SidebarMenu>
|
||||||
|
</SidebarWrapper>
|
||||||
|
</SidebarContainer>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
BIN
src/images/book_cover.jpg
Normal file
BIN
src/images/book_cover.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 282 KiB |
1495
src/images/coding.svg
Normal file
1495
src/images/coding.svg
Normal file
File diff suppressed because it is too large
Load Diff
|
After Width: | Height: | Size: 96 KiB |
1908
src/images/instructor.svg
Normal file
1908
src/images/instructor.svg
Normal file
File diff suppressed because it is too large
Load Diff
|
After Width: | Height: | Size: 234 KiB |
2101
src/images/media.svg
Normal file
2101
src/images/media.svg
Normal file
File diff suppressed because it is too large
Load Diff
|
After Width: | Height: | Size: 138 KiB |
2921
src/images/more.svg
Normal file
2921
src/images/more.svg
Normal file
File diff suppressed because it is too large
Load Diff
|
After Width: | Height: | Size: 266 KiB |
10
src/index.jsx
Executable file
10
src/index.jsx
Executable file
@@ -0,0 +1,10 @@
|
|||||||
|
import App from "@/App";
|
||||||
|
import "@/styles/globals.css";
|
||||||
|
import React from "react";
|
||||||
|
import ReactDOM from "react-dom/client";
|
||||||
|
|
||||||
|
ReactDOM.createRoot(document.getElementById("root")).render(
|
||||||
|
<React.StrictMode>
|
||||||
|
<App />
|
||||||
|
</React.StrictMode>,
|
||||||
|
);
|
||||||
30
src/pages/index.jsx
Executable file
30
src/pages/index.jsx
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
import Footer from "@/components/Footer";
|
||||||
|
import HeroSection from "@/components/HeroSection";
|
||||||
|
import InstructorsSection from "@/components/Instructors";
|
||||||
|
import MediaSection from "@/components/Media";
|
||||||
|
import MoreSection from "@/components/More";
|
||||||
|
import Navbar from "@/components/Navbar";
|
||||||
|
import NotebookSection from "@/components/Notebooks";
|
||||||
|
import Sidebar from "@/components/Sidebar";
|
||||||
|
import { useState } from "react";
|
||||||
|
|
||||||
|
export default function Index() {
|
||||||
|
const [isOpen, setIsOpen] = useState(false);
|
||||||
|
|
||||||
|
const toggle = () => {
|
||||||
|
setIsOpen((p) => !p);
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Sidebar isOpen={isOpen} toggle={toggle} />
|
||||||
|
<Navbar toggle={toggle} />
|
||||||
|
<HeroSection />
|
||||||
|
<NotebookSection />
|
||||||
|
<InstructorsSection />
|
||||||
|
<MediaSection />
|
||||||
|
<MoreSection />
|
||||||
|
<Footer />
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
6
src/styles/globals.css
Executable file
6
src/styles/globals.css
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
* {
|
||||||
|
box-sizing: border-box;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
font-family: "Encode Sans Expanded", sans-serif;
|
||||||
|
}
|
||||||
23
style.css
23
style.css
@@ -1,23 +0,0 @@
|
|||||||
body {
|
|
||||||
font-size: 17px;
|
|
||||||
margin: 2% 10%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#head {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: row;
|
|
||||||
flex-wrap: wrap-reverse;
|
|
||||||
justify-content: space-between;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#cover {
|
|
||||||
justify-content: center;
|
|
||||||
display: flex;
|
|
||||||
width: 30%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#cover img {
|
|
||||||
width: 100%;
|
|
||||||
height: min-content;
|
|
||||||
}
|
|
||||||
20
vite.config.js
Normal file
20
vite.config.js
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import react from "@vitejs/plugin-react-swc";
|
||||||
|
import path from "node:path";
|
||||||
|
import { defineConfig } from "vite";
|
||||||
|
|
||||||
|
// https://vitejs.dev/config/
|
||||||
|
export default defineConfig({
|
||||||
|
plugins: [react()],
|
||||||
|
resolve: {
|
||||||
|
alias: {
|
||||||
|
"@": path.resolve(__dirname, "./src"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
server: {
|
||||||
|
port: 3000,
|
||||||
|
},
|
||||||
|
preview: {
|
||||||
|
port: 3000,
|
||||||
|
},
|
||||||
|
base: "/udlbook",
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user