diff --git a/Notebooks/Chap02/2_1_Supervised_Learning.ipynb b/Notebooks/Chap02/2_1_Supervised_Learning.ipynb
index 43918e7..83b34f4 100644
--- a/Notebooks/Chap02/2_1_Supervised_Learning.ipynb
+++ b/Notebooks/Chap02/2_1_Supervised_Learning.ipynb
@@ -31,7 +31,7 @@
"source": [
"# Notebook 2.1 Supervised Learning\n",
"\n",
- "The purpose of this notebook is to explore the linear regression model dicussed in Chapter 2 of the book.\n",
+ "The purpose of this notebook is to explore the linear regression model discussed in Chapter 2 of the book.\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and write code to complete the functions. There are also questions interspersed in the text.\n",
"\n",
diff --git a/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb b/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb
index 1e05a1a..d4281ee 100644
--- a/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb
+++ b/Notebooks/Chap05/5_1_Least_Squares_Loss.ipynb
@@ -139,7 +139,7 @@
"source": [
"# Univariate regression\n",
"\n",
- "We'll investigate a simple univarite regression situation with a single input $x$ and a single output $y$ as pictured in figures 5.4 and 5.5b."
+ "We'll investigate a simple univariate regression situation with a single input $x$ and a single output $y$ as pictured in figures 5.4 and 5.5b."
],
"metadata": {
"id": "PsgLZwsPxauP"
diff --git a/Notebooks/Chap06/6_2_Gradient_Descent.ipynb b/Notebooks/Chap06/6_2_Gradient_Descent.ipynb
index 276d8a3..4bdd365 100644
--- a/Notebooks/Chap06/6_2_Gradient_Descent.ipynb
+++ b/Notebooks/Chap06/6_2_Gradient_Descent.ipynb
@@ -31,7 +31,7 @@
"source": [
"# **Notebook 6.2 Gradient descent**\n",
"\n",
- "This notebook recreates the gradient descent algorithm as shon in figure 6.1.\n",
+ "This notebook recreates the gradient descent algorithm as shown in figure 6.1.\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
"\n",
diff --git a/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb b/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb
index 1ad3a89..282039e 100644
--- a/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb
+++ b/Notebooks/Chap06/6_3_Stochastic_Gradient_Descent.ipynb
@@ -123,7 +123,7 @@
{
"cell_type": "code",
"source": [
- "# Initialize the parmaeters and draw the model\n",
+ "# Initialize the parameters and draw the model\n",
"phi = np.zeros((2,1))\n",
"phi[0] = -5 # Horizontal offset\n",
"phi[1] = 25 # Frequency\n",
diff --git a/Notebooks/Chap06/6_4_Momentum.ipynb b/Notebooks/Chap06/6_4_Momentum.ipynb
index 87cce41..40f5a65 100644
--- a/Notebooks/Chap06/6_4_Momentum.ipynb
+++ b/Notebooks/Chap06/6_4_Momentum.ipynb
@@ -123,7 +123,7 @@
{
"cell_type": "code",
"source": [
- "# Initialize the parmaeters and draw the model\n",
+ "# Initialize the parameters and draw the model\n",
"phi = np.zeros((2,1))\n",
"phi[0] = -5 # Horizontal offset\n",
"phi[1] = 25 # Frequency\n",
diff --git a/Notebooks/Chap06/6_5_Adam.ipynb b/Notebooks/Chap06/6_5_Adam.ipynb
index 31d4762..e9ef3c7 100644
--- a/Notebooks/Chap06/6_5_Adam.ipynb
+++ b/Notebooks/Chap06/6_5_Adam.ipynb
@@ -248,7 +248,7 @@
" # Replace this line:\n",
" v = v\n",
"\n",
- " # TODO -- Modify the statistics according to euation 6.16\n",
+ " # TODO -- Modify the statistics according to equation 6.16\n",
" # You will need the function np.power\n",
" # Replace these lines\n",
" m_tilde = m\n",
diff --git a/Notebooks/Chap07/7_2_Backpropagation.ipynb b/Notebooks/Chap07/7_2_Backpropagation.ipynb
index e79a340..c52cf46 100644
--- a/Notebooks/Chap07/7_2_Backpropagation.ipynb
+++ b/Notebooks/Chap07/7_2_Backpropagation.ipynb
@@ -143,7 +143,7 @@
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
" for layer in range(K):\n",
" # Update preactivations and activations at this layer according to eqn 7.16\n",
- " # Remmember to use np.matmul for matrrix multiplications\n",
+ " # Remember to use np.matmul for matrix multiplications\n",
" # TODO -- Replace the lines below\n",
" all_f[layer] = all_h[layer]\n",
" all_h[layer+1] = all_f[layer]\n",
@@ -299,7 +299,7 @@
"# Let's test if we have the derivatives right using finite differences\n",
"delta_fd = 0.000001\n",
"\n",
- "# Test the dervatives of the bias vectors\n",
+ "# Test the derivatives of the bias vectors\n",
"for layer in range(K):\n",
" dl_dbias = np.zeros_like(all_dl_dbiases[layer])\n",
" # For every element in the bias\n",
diff --git a/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb b/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb
index 201a4d6..ad2177c 100644
--- a/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb
+++ b/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb
@@ -77,7 +77,7 @@
" for i in range(n_data):\n",
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
"\n",
- " # y value from running through functoin and adding noise\n",
+ " # y value from running through function and adding noise\n",
" y = np.ones(n_data)\n",
" for i in range(n_data):\n",
" y[i] = true_function(x[i])\n",
@@ -229,7 +229,7 @@
" y_model_all = np.zeros((n_datasets, x_model.shape[0]))\n",
"\n",
" for c_dataset in range(n_datasets):\n",
- " # TODO -- Generate n_data x,y, pairs with standard divation sigma_func\n",
+ " # TODO -- Generate n_data x,y, pairs with standard deviation sigma_func\n",
" # Replace this line\n",
" x_data,y_data = np.zeros([1,n_data]),np.zeros([1,n_data])\n",
"\n",
@@ -316,7 +316,7 @@
"\n",
" # Compute variance -- average of the model variance (average squared deviation of fitted models around mean fitted model)\n",
" variance[c_hidden] = 0\n",
- " # Compute bias (average squared deviaton of mean fitted model around true function)\n",
+ " # Compute bias (average squared deviation of mean fitted model around true function)\n",
" bias[c_hidden] = 0\n",
"\n",
"# Plot the results\n",
diff --git a/Notebooks/Chap09/9_1_L2_Regularization.ipynb b/Notebooks/Chap09/9_1_L2_Regularization.ipynb
index bda5af3..4830640 100644
--- a/Notebooks/Chap09/9_1_L2_Regularization.ipynb
+++ b/Notebooks/Chap09/9_1_L2_Regularization.ipynb
@@ -120,7 +120,7 @@
{
"cell_type": "code",
"source": [
- "# Initialize the parmaeters and draw the model\n",
+ "# Initialize the parameters and draw the model\n",
"phi = np.zeros((2,1))\n",
"phi[0] = -5 # Horizontal offset\n",
"phi[1] = 25 # Frequency\n",
diff --git a/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb b/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb
index 0bd420b..c0a4117 100644
--- a/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb
+++ b/Notebooks/Chap09/9_4_Bayesian_Approach.ipynb
@@ -80,7 +80,7 @@
" for i in range(n_data):\n",
" x[i] = np.random.uniform(i/n_data, (i+1)/n_data, 1)\n",
"\n",
- " # y value from running through functoin and adding noise\n",
+ " # y value from running through function and adding noise\n",
" y = np.ones(n_data)\n",
" for i in range(n_data):\n",
" y[i] = true_function(x[i])\n",
@@ -137,7 +137,7 @@
"n_data = 15\n",
"x_data,y_data = generate_data(n_data, sigma_func)\n",
"\n",
- "# Plot the functinon, data and uncertainty\n",
+ "# Plot the function, data and uncertainty\n",
"plot_function(x_func, y_func, x_data, y_data, sigma_func=sigma_func)"
],
"metadata": {
@@ -357,7 +357,7 @@
"\n",
"To compute this, we reformulated the integrand using the relations from appendices\n",
"C.3.3 and C.3.4 as the product of a normal distribution in $\\boldsymbol\\phi$ and a constant with respect\n",
- "to $\\boldsymbol\\phi$. The integral of the normal distribution must be one, and so the finnal result is just the constant. This constant is itself a normal distribution in $y^*$.
\n",
+ "to $\\boldsymbol\\phi$. The integral of the normal distribution must be one, and so the final result is just the constant. This constant is itself a normal distribution in $y^*$.
\n",
"\n",
"If you feel so inclined you can work through the math of this yourself."
],
diff --git a/Notebooks/Chap12/12_1_Self_Attention.ipynb b/Notebooks/Chap12/12_1_Self_Attention.ipynb
index d1e9fe5..3a26f94 100644
--- a/Notebooks/Chap12/12_1_Self_Attention.ipynb
+++ b/Notebooks/Chap12/12_1_Self_Attention.ipynb
@@ -153,7 +153,7 @@
{
"cell_type": "markdown",
"source": [
- "We'll need a softmax function (equation 12.5) -- here, it will take a list of arbirtrary numbers and return a list where the elements are non-negative and sum to one\n"
+ "We'll need a softmax function (equation 12.5) -- here, it will take a list of arbitrary numbers and return a list where the elements are non-negative and sum to one\n"
],
"metadata": {
"id": "Se7DK6PGPSUk"
@@ -364,7 +364,7 @@
{
"cell_type": "markdown",
"source": [
- "TODO -- Investigate whether the self-attention mechanism is covariant with respect to permulation.\n",
+ "TODO -- Investigate whether the self-attention mechanism is covariant with respect to permutation.\n",
"If it is, when we permute the columns of the input matrix $\\mathbf{X}$, the columns of the output matrix $\\mathbf{X}'$ will also be permuted.\n"
],
"metadata": {
diff --git a/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb b/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb
index fc5a6a7..7e9b0b3 100644
--- a/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb
+++ b/Notebooks/Chap12/12_2_Multihead_Self_Attention.ipynb
@@ -31,7 +31,7 @@
"source": [
"# **Notebook 12.1: Multhead Self-Attention**\n",
"\n",
- "This notebook builds a multihead self-attentionm mechanism as in figure 12.6\n",
+ "This notebook builds a multihead self-attention mechanism as in figure 12.6\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
"\n",
diff --git a/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb b/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb
index 440036a..4f18380 100644
--- a/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb
+++ b/Notebooks/Chap15/15_1_GAN_Toy_Example.ipynb
@@ -31,7 +31,7 @@
"source": [
"# **Notebook 15.1: GAN Toy example**\n",
"\n",
- "This notebook investigates the GAN toy example as illustred in figure 15.1 in the book.\n",
+ "This notebook investigates the GAN toy example as illustrated in figure 15.1 in the book.\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
"\n",
@@ -101,7 +101,7 @@
{
"cell_type": "markdown",
"source": [
- "Now, we define our disriminator. This is a simple logistic regression model (1D linear model passed through sigmoid) that returns the probability that the data is real"
+ "Now, we define our discriminator. This is a simple logistic regression model (1D linear model passed through sigmoid) that returns the probability that the data is real"
],
"metadata": {
"id": "Xrzd8aehYAYR"
@@ -387,7 +387,7 @@
"print(\"Final parameters (phi0,phi1)\", phi0, phi1)\n",
"for c_gan_iter in range(5):\n",
"\n",
- " # Run generator to product syntehsized data\n",
+ " # Run generator to product synthesized data\n",
" x_syn = generator(z, theta)\n",
" draw_data_model(x_real, x_syn, phi0, phi1)\n",
"\n",
diff --git a/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb b/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb
index 9314dba..10037b0 100644
--- a/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb
+++ b/Notebooks/Chap15/15_2_Wasserstein_Distance.ipynb
@@ -29,9 +29,9 @@
{
"cell_type": "markdown",
"source": [
- "# **Notebook 15.2: Wassserstein Distance**\n",
+ "# **Notebook 15.2: Wasserstein Distance**\n",
"\n",
- "This notebook investigates the GAN toy example as illustred in figure 15.1 in the book.\n",
+ "This notebook investigates the GAN toy example as illustrated in figure 15.1 in the book.\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
"\n",
diff --git a/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb b/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb
index 7d5839d..3f3b521 100644
--- a/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb
+++ b/Notebooks/Chap16/16_2_Autoregressive_Flows.ipynb
@@ -65,7 +65,7 @@
{
"cell_type": "code",
"source": [
- "# First let's make the 1D piecewise linear mapping as illustated in figure 16.5\n",
+ "# First let's make the 1D piecewise linear mapping as illustrated in figure 16.5\n",
"def g(h, phi):\n",
" # TODO -- write this function (equation 16.12)\n",
" # Note: If you have the first printing of the book, there is a mistake in equation 16.12\n",
@@ -156,7 +156,7 @@
{
"cell_type": "markdown",
"source": [
- "Now let's define an autogressive flow. Let's switch to looking at figure 16.7.# We'll assume that our piecewise function will use five parameters phi1,phi2,phi3,phi4,phi5"
+ "Now let's define an autoregressive flow. Let's switch to looking at figure 16.7.# We'll assume that our piecewise function will use five parameters phi1,phi2,phi3,phi4,phi5"
],
"metadata": {
"id": "t8XPxipfd7hz"
@@ -175,7 +175,7 @@
" x = x/ np.sum(x) ;\n",
" return x\n",
"\n",
- "# Return value of phi that doesn't depend on any of the iputs\n",
+ "# Return value of phi that doesn't depend on any of the inputs\n",
"def get_phi():\n",
" return np.array([0.2, 0.1, 0.4, 0.05, 0.25])\n",
"\n",
diff --git a/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb b/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb
index 21b48c6..88075f0 100644
--- a/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb
+++ b/Notebooks/Chap17/17_2_Reparameterization_Trick.ipynb
@@ -83,7 +83,7 @@
{
"cell_type": "code",
"source": [
- "# Let's approximate this expecctation for a particular value of phi\n",
+ "# Let's approximate this expectation for a particular value of phi\n",
"def compute_expectation(phi, n_samples):\n",
" # TODO complete this function\n",
" # 1. Compute the mean of the normal distribution, mu\n",
diff --git a/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb b/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb
index 7c6542a..56e0628 100644
--- a/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb
+++ b/Notebooks/Chap18/18_1_Diffusion_Encoder.ipynb
@@ -233,7 +233,7 @@
{
"cell_type": "markdown",
"source": [
- "Notice that the samples have a tendencey to move toward the center. Now let's look at the histogram of the samples at each stage"
+ "Notice that the samples have a tendency to move toward the center. Now let's look at the histogram of the samples at each stage"
],
"metadata": {
"id": "SGTYGGevAktz"
@@ -418,7 +418,7 @@
" # 1. For each x (value in x_plot_vals):\n",
" # 2. Compute the mean and variance of the diffusion kernel at time t\n",
" # 3. Compute pdf of this Gaussian at every x_plot_val\n",
- " # 4. Weight Gaussian by probability at position x and by 0.01 to componensate for bin size\n",
+ " # 4. Weight Gaussian by probability at position x and by 0.01 to compensate for bin size\n",
" # 5. Accumulate weighted Gaussian in marginal at time t.\n",
" # 6. Multiply result by 0.01 to compensate for bin size\n",
" # Replace this line:\n",
diff --git a/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb b/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb
index eb7f70a..a706a21 100644
--- a/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb
+++ b/Notebooks/Chap18/18_2_1D_Diffusion_Model.ipynb
@@ -172,7 +172,7 @@
" # Find diffusion kernel for every x_train and draw samples\n",
" dk_mean, dk_std = diffusion_kernel(x_train, t, beta)\n",
" z_t = np.random.normal(size=x_train.shape) * dk_std + dk_mean\n",
- " # Find conditional diffusion distribution for each x_train, z pair and draw samlpes\n",
+ " # Find conditional diffusion distribution for each x_train, z pair and draw samples\n",
" cd_mean, cd_std = conditional_diffusion_distribution(x_train,z_t,t,beta)\n",
" if t == 1:\n",
" z_tminus1 = x_train\n",
diff --git a/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb b/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb
index f325c14..7135aec 100644
--- a/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb
+++ b/Notebooks/Chap19/19_4_Temporal_Difference_Methods.ipynb
@@ -31,7 +31,7 @@
"source": [
"# **Notebook 19.4: Temporal difference methods**\n",
"\n",
- "This notebook investigates temporal differnece methods for tabular reinforcement learning as described in section 19.3.3 of the book\n",
+ "This notebook investigates temporal difference methods for tabular reinforcement learning as described in section 19.3.3 of the book\n",
"\n",
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n",
"\n",
diff --git a/Notebooks/Chap19/19_5_Control_Variates.ipynb b/Notebooks/Chap19/19_5_Control_Variates.ipynb
index 95d10a4..5af4f99 100644
--- a/Notebooks/Chap19/19_5_Control_Variates.ipynb
+++ b/Notebooks/Chap19/19_5_Control_Variates.ipynb
@@ -57,7 +57,7 @@
{
"cell_type": "markdown",
"source": [
- "Genearate from our two variables, $a$ and $b$. We are interested in estimating the mean of $a$, but we can use $b$$ to improve our estimates if it is correlated"
+ "Generate from our two variables, $a$ and $b$. We are interested in estimating the mean of $a$, but we can use $b$$ to improve our estimates if it is correlated"
],
"metadata": {
"id": "uwmhcAZBzTRO"