Compare commits

..

31 Commits

Author SHA1 Message Date
udlbook
ea1a7aeba0 Add files via upload 2023-12-24 13:38:05 -05:00
udlbook
d6d0afdfd3 Created using Colaboratory 2023-12-24 12:34:48 -05:00
udlbook
56b4edb871 Created using Colaboratory 2023-12-24 12:01:00 -05:00
udlbook
86387e2901 Created using Colaboratory 2023-12-24 11:53:10 -05:00
udlbook
b9ec83d5f5 Created using Colaboratory 2023-12-24 11:49:04 -05:00
udlbook
0748270a1c Created using Colaboratory 2023-12-24 11:42:23 -05:00
udlbook
51c582f693 Update 6_2_Gradient_Descent.ipynb 2023-12-24 11:36:07 -05:00
udlbook
d7ca8b80d4 Update 3_1_Shallow_Networks_I.ipynb 2023-12-24 11:16:55 -05:00
udlbook
7549cbaa59 Update 5_1_Least_Squares_Loss.ipynb 2023-12-24 10:18:32 -05:00
udlbook
3cdb675cef Created using Colaboratory 2023-12-24 10:16:00 -05:00
udlbook
48d9a4e108 Created using Colaboratory 2023-12-24 10:13:36 -05:00
udlbook
73c58fa0fa Created using Colaboratory 2023-12-24 10:12:00 -05:00
udlbook
dbde6d3d89 Created using Colaboratory 2023-12-24 10:03:28 -05:00
udlbook
182293e8d6 Update 6_3_Stochastic_Gradient_Descent.ipynb 2023-12-24 09:19:51 -05:00
udlbook
d7468ee1c1 Add files via upload 2023-12-23 11:16:28 -05:00
udlbook
5abe61e767 Add files via upload 2023-12-23 11:05:04 -05:00
udlbook
476335cc6f Update 13_2_Graph_Classification.ipynb 2023-12-20 15:31:06 -05:00
udlbook
337d6cd544 Update 13_3_Neighborhood_Sampling.ipynb 2023-12-20 15:30:40 -05:00
udlbook
76498f8ef9 Created using Colaboratory 2023-12-20 15:29:10 -05:00
udlbook
89744c0f0f Update 10_1_1D_Convolution.ipynb 2023-12-20 15:27:59 -05:00
udlbook
ec40d10ecd Update 21_1_Bias_Mitigation.ipynb 2023-12-20 15:26:48 -05:00
udlbook
a71283e322 Update 18_1_Diffusion_Encoder.ipynb 2023-12-20 15:24:15 -05:00
udlbook
d9e7306ef4 Update 19_1_Markov_Decision_Processes.ipynb 2023-12-20 15:23:18 -05:00
udlbook
d5304c8034 Created using Colaboratory 2023-12-20 15:22:17 -05:00
udlbook
985c08950e Update 2_1_Supervised_Learning.ipynb 2023-12-20 15:19:07 -05:00
udlbook
97d738d408 Update 13_2_Graph_Classification.ipynb 2023-12-20 14:43:21 -05:00
udlbook
7e264f5310 Created using Colaboratory 2023-12-20 14:38:30 -05:00
udlbook
3b266ba33b Created using Colaboratory 2023-12-20 14:17:47 -05:00
udlbook
f75b1cb983 Created using Colaboratory 2023-12-20 14:15:24 -05:00
udlbook
7afe033e50 Update 4_3_Deep_Networks.ipynb 2023-12-18 16:55:51 -05:00
udlbook
c68045feb1 Update index.html 2023-12-17 17:58:48 -05:00
26 changed files with 171 additions and 417 deletions

View File

@@ -213,7 +213,7 @@
"\n",
"# Make a 2D array for the losses\n",
"all_losses = np.zeros_like(phi1_mesh)\n",
"# Run throught each 2D combination of phi0, phi1 and compute loss\n",
"# Run through each 2D combination of phi0, phi1 and compute loss\n",
"for indices,temp in np.ndenumerate(phi1_mesh):\n",
" all_losses[indices] = compute_loss(x,y, phi0_mesh[indices], phi1_mesh[indices])\n"
],
@@ -250,4 +250,4 @@
"outputs": []
}
]
}
}

View File

@@ -347,7 +347,7 @@
"\n",
"# Compute the least squares loss and print it out\n",
"loss = least_squares_loss(y_train,y_predict)\n",
"print(\"Loss = %3.3f\"%(loss))\n",
"print("Your Loss = %3.3f, True value = 9.385"%(loss))\n",
"\n",
"# TODO. Manipulate the parameters (by hand!) to make the function\n",
"# fit the data better and try to reduce the loss to as small a number\n",
@@ -362,4 +362,4 @@
"outputs": []
}
]
}
}

View File

@@ -101,7 +101,6 @@
"cell_type": "code",
"source": [
"# # Plot the shallow neural network. We'll assume input in is range [-1,1] and output [-1,1]\n",
"# If the plot_all flag is set to true, then we'll plot all the intermediate stages as in Figure 3.3\n",
"def plot_neural(x, y):\n",
" fig, ax = plt.subplots()\n",
" ax.plot(x.T,y.T)\n",
@@ -319,4 +318,4 @@
"outputs": []
}
]
}
}

View File

@@ -4,7 +4,6 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyOJeBMhN9fXO8UepZ4+Pbg6",
"include_colab_link": true
},
"kernelspec": {
@@ -433,12 +432,25 @@
"cell_type": "code",
"source": [
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
"fig, ax = plt.subplots(1,3)\n",
"fig.set_size_inches(10.5, 3.5)\n",
"fig.tight_layout(pad=3.0)\n",
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0]$'); ax[0].set_ylabel('likelihood')\n",
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0]'); ax[1].set_ylabel('negative log likelihood')\n",
"ax[2].plot(beta_1_vals, sum_squares); ax[2].set_xlabel('beta_1[0]'); ax[2].set_ylabel('sum of squares')\n",
"fig, ax = plt.subplots(1,2)\n",
"fig.set_size_inches(10.5, 5.5)\n",
"fig.tight_layout(pad=10.0)\n",
"likelihood_color = 'tab:red'\n",
"nll_color = 'tab:blue'\n",
"\n",
"ax[0].set_xlabel('beta_1[0]')\n",
"ax[0].set_ylabel('likelihood', color = likelihood_color)\n",
"ax[0].plot(beta_1_vals, likelihoods, color = likelihood_color)\n",
"ax[0].tick_params(axis='y', labelcolor=likelihood_color)\n",
"\n",
"ax00 = ax[0].twinx()\n",
"ax00.plot(beta_1_vals, nlls, color = nll_color)\n",
"ax00.set_ylabel('negative log likelihood', color = nll_color)\n",
"ax00.tick_params(axis='y', labelcolor = nll_color)\n",
"\n",
"plt.axvline(x = beta_1_vals[np.argmax(likelihoods)], linestyle='dotted')\n",
"\n",
"ax[1].plot(beta_1_vals, sum_squares); ax[1].set_xlabel('beta_1[0]'); ax[1].set_ylabel('sum of squares')\n",
"plt.show()"
],
"metadata": {
@@ -519,12 +531,26 @@
"cell_type": "code",
"source": [
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the standard divation sigma\n",
"fig, ax = plt.subplots(1,3)\n",
"fig.set_size_inches(10.5, 3.5)\n",
"fig.tight_layout(pad=3.0)\n",
"ax[0].plot(sigma_vals, likelihoods); ax[0].set_xlabel('$\\sigma$'); ax[0].set_ylabel('likelihood')\n",
"ax[1].plot(sigma_vals, nlls); ax[1].set_xlabel('$\\sigma$'); ax[1].set_ylabel('negative log likelihood')\n",
"ax[2].plot(sigma_vals, sum_squares); ax[2].set_xlabel('$\\sigma$'); ax[2].set_ylabel('sum of squares')\n",
"fig, ax = plt.subplots(1,2)\n",
"fig.set_size_inches(10.5, 5.5)\n",
"fig.tight_layout(pad=10.0)\n",
"likelihood_color = 'tab:red'\n",
"nll_color = 'tab:blue'\n",
"\n",
"\n",
"ax[0].set_xlabel('sigma')\n",
"ax[0].set_ylabel('likelihood', color = likelihood_color)\n",
"ax[0].plot(sigma_vals, likelihoods, color = likelihood_color)\n",
"ax[0].tick_params(axis='y', labelcolor=likelihood_color)\n",
"\n",
"ax00 = ax[0].twinx()\n",
"ax00.plot(sigma_vals, nlls, color = nll_color)\n",
"ax00.set_ylabel('negative log likelihood', color = nll_color)\n",
"ax00.tick_params(axis='y', labelcolor = nll_color)\n",
"\n",
"plt.axvline(x = sigma_vals[np.argmax(likelihoods)], linestyle='dotted')\n",
"\n",
"ax[1].plot(sigma_vals, sum_squares); ax[1].set_xlabel('sigma'); ax[1].set_ylabel('sum of squares')\n",
"plt.show()"
],
"metadata": {

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyOlPP7m+YTLyMPaN0WxRdrb",
"authorship_tag": "ABX9TyOSb+W2AOFVQm8FZcHAb2Jq",
"include_colab_link": true
},
"kernelspec": {
@@ -378,12 +378,25 @@
{
"cell_type": "code",
"source": [
"# Now let's plot the likelihood, and negative log likelihoods as a function the value of the offset beta1\n",
"fig, ax = plt.subplots(1,2)\n",
"fig.set_size_inches(10.5, 3.5)\n",
"fig.tight_layout(pad=3.0)\n",
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0]'); ax[0].set_ylabel('likelihood')\n",
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0]'); ax[1].set_ylabel('negative log likelihood')\n",
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
"fig, ax = plt.subplots()\n",
"fig.tight_layout(pad=5.0)\n",
"likelihood_color = 'tab:red'\n",
"nll_color = 'tab:blue'\n",
"\n",
"\n",
"ax.set_xlabel('beta_1[0]')\n",
"ax.set_ylabel('likelihood', color = likelihood_color)\n",
"ax.plot(beta_1_vals, likelihoods, color = likelihood_color)\n",
"ax.tick_params(axis='y', labelcolor=likelihood_color)\n",
"\n",
"ax1 = ax.twinx()\n",
"ax1.plot(beta_1_vals, nlls, color = nll_color)\n",
"ax1.set_ylabel('negative log likelihood', color = nll_color)\n",
"ax1.tick_params(axis='y', labelcolor = nll_color)\n",
"\n",
"plt.axvline(x = beta_1_vals[np.argmax(likelihoods)], linestyle='dotted')\n",
"\n",
"plt.show()"
],
"metadata": {

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyPNAZtbS+8jYc+tZqhDHNev",
"authorship_tag": "ABX9TyOPv/l+ToaApJV7Nz+8AtpV",
"include_colab_link": true
},
"kernelspec": {
@@ -401,12 +401,25 @@
{
"cell_type": "code",
"source": [
"# Now let's plot the likelihood, negative log likelihood as a function the value of the offset beta1\n",
"fig, ax = plt.subplots(1,2)\n",
"fig.set_size_inches(10.5, 3.5)\n",
"fig.tight_layout(pad=3.0)\n",
"ax[0].plot(beta_1_vals, likelihoods); ax[0].set_xlabel('beta_1[0,0]'); ax[0].set_ylabel('likelihood')\n",
"ax[1].plot(beta_1_vals, nlls); ax[1].set_xlabel('beta_1[0,0]'); ax[1].set_ylabel('negative log likelihood')\n",
"# Now let's plot the likelihood, negative log likelihood, and least squares as a function the value of the offset beta1\n",
"fig, ax = plt.subplots()\n",
"fig.tight_layout(pad=5.0)\n",
"likelihood_color = 'tab:red'\n",
"nll_color = 'tab:blue'\n",
"\n",
"\n",
"ax.set_xlabel('beta_1[0, 0]')\n",
"ax.set_ylabel('likelihood', color = likelihood_color)\n",
"ax.plot(beta_1_vals, likelihoods, color = likelihood_color)\n",
"ax.tick_params(axis='y', labelcolor=likelihood_color)\n",
"\n",
"ax1 = ax.twinx()\n",
"ax1.plot(beta_1_vals, nlls, color = nll_color)\n",
"ax1.set_ylabel('negative log likelihood', color = nll_color)\n",
"ax1.tick_params(axis='y', labelcolor = nll_color)\n",
"\n",
"plt.axvline(x = beta_1_vals[np.argmax(likelihoods)], linestyle='dotted')\n",
"\n",
"plt.show()"
],
"metadata": {

View File

@@ -4,7 +4,6 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyN2N4cCnlIobOZXEjcwAvZ5",
"include_colab_link": true
},
"kernelspec": {
@@ -301,7 +300,7 @@
{
"cell_type": "markdown",
"source": [
"Now we are ready to perform gradient descent. We'll need to use our line search routine from noteboo 6.1, which I've reproduced here plus the helper function loss_function_1D that converts from a 2D problem to a 1D problem"
"Now we are ready to perform gradient descent. We'll need to use our line search routine from notebook 6.1, which I've reproduced here plus the helper function loss_function_1D that maps the search along the negative gradient direction in 2D space to a 1D problem (distance along this direction)"
],
"metadata": {
"id": "5EIjMM9Fw2eT"
@@ -310,9 +309,9 @@
{
"cell_type": "code",
"source": [
"def loss_function_1D(dist_prop, data, model, phi_start, gradient):\n",
"def loss_function_1D(dist_prop, data, model, phi_start, search_direction):\n",
" # Return the loss after moving this far\n",
" return compute_loss(data[0,:], data[1,:], model, phi_start+ gradient * dist_prop)\n",
" return compute_loss(data[0,:], data[1,:], model, phi_start+ search_direction * dist_prop)\n",
"\n",
"def line_search(data, model, phi, gradient, thresh=.00001, max_dist = 0.1, max_iter = 15, verbose=False):\n",
" # Initialize four points along the range we are going to search\n",
@@ -418,4 +417,4 @@
"outputs": []
}
]
}
}

View File

@@ -518,7 +518,7 @@
" # at each step\n",
" # You can use the function np.random.permutation to generate a random permutation of the n_data = data.shape[1] indices\n",
" # and then just choose the first n=batch_size of these indices. Then compute the gradient update\n",
" # from just the data with these indices. More properly, you should sample with replacement, but this will do for now.\n",
" # from just the data with these indices. More properly, you should sample without replacement, but this will do for now.\n",
"\n",
"\n",
" return phi"
@@ -583,4 +583,4 @@
"outputs": []
}
]
}
}

View File

@@ -4,7 +4,6 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyMLS4qeqBTVHGdg9Sds9jND",
"include_colab_link": true
},
"kernelspec": {
@@ -377,6 +376,15 @@
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"Note that for this case, Nesterov momentum does not improve the result."
],
"metadata": {
"id": "F-As4hS8s2nm"
}
}
]
}

View File

@@ -4,7 +4,6 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyPz1B8kFc21JvGTDwqniloA",
"include_colab_link": true
},
"kernelspec": {
@@ -185,10 +184,8 @@
" if A[i,j] < 0:\n",
" A[i,j] = 0;\n",
"\n",
" ATA = np.matmul(np.transpose(A), A)\n",
" ATAInv = np.linalg.inv(ATA)\n",
" ATAInvAT = np.matmul(ATAInv, np.transpose(A))\n",
" beta_omega = np.matmul(ATAInvAT,y)\n",
" beta_omega = np.linalg.lstsq(A, y, rcond=None)[0]\n",
"\n",
" beta = beta_omega[0]\n",
" omega = beta_omega[1:]\n",
"\n",

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyNuR7X+PMWRddy+WQr4gr5f",
"authorship_tag": "ABX9TyOAC7YLEqN5qZhJXqRj+aHB",
"include_colab_link": true
},
"kernelspec": {
@@ -184,7 +184,9 @@
" A = np.ones((n_data, n_hidden+1))\n",
" for i in range(n_data):\n",
" for j in range(1,n_hidden+1):\n",
" # Compute preactivation\n",
" A[i,j] = x[i]-(j-1)/n_hidden\n",
" # Apply the ReLU function\n",
" if A[i,j] < 0:\n",
" A[i,j] = 0;\n",
"\n",

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyM3wq9CHLjekkIXIgXRxueE",
"authorship_tag": "ABX9TyM38ZVBK4/xaHk5Ys5lF6dN",
"include_colab_link": true
},
"kernelspec": {
@@ -208,14 +208,14 @@
{
"cell_type": "code",
"source": [
"def augment(data_in):\n",
"def augment(input_vector):\n",
" # Create output vector\n",
" data_out = np.zeros_like(data_in)\n",
" data_out = np.zeros_like(input_vector)\n",
"\n",
" # TODO: Shift the input data by a random offset\n",
" # (rotating, so points that would go off the end, are added back to the beginning)\n",
" # Replace this line:\n",
" data_out = np.zeros_like(data_in) ;\n",
" data_out = np.zeros_like(input_vector) ;\n",
"\n",
" # TODO: # Randomly scale the data by a factor drawn from a uniform distribution over [0.8,1.2]\n",
" # Replace this line:\n",

View File

@@ -341,7 +341,7 @@
"# Compute matrix in figure 10.4 d\n",
"def get_conv_mat_3_1_1_zp(n_out, omega):\n",
" omega_mat = np.zeros((n_out,n_out))\n",
" # TODO Fill in this matix\n",
" # TODO Fill in this matrix\n",
" # Replace this line:\n",
" omega_mat = omega_mat\n",
"\n",
@@ -384,4 +384,4 @@
}
}
]
}
}

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyObut1y9atNUuowPT6dMY+I",
"authorship_tag": "ABX9TyMXS3SPB4cS/4qxix0lH/Hq",
"include_colab_link": true
},
"kernelspec": {
@@ -144,10 +144,10 @@
" def count_params(self):\n",
" return sum([p.view(-1).shape[0] for p in self.parameters()])\n",
"\n",
"# # TODO -- Add residual connections to this model\n",
"# # The order of operations should similar to figure 11.5b\n",
"# # linear1 first, ReLU+linear2 in first residual block, ReLU+linear3 in second residual block), linear4 at end\n",
"# # Replace this function\n",
"# TODO -- Add residual connections to this model\n",
"# The order of operations within each block should similar to figure 11.5b\n",
"# ie., linear1 first, ReLU+linear2 in first residual block, ReLU+linear3 in second residual block), linear4 at end\n",
"# Replace this function\n",
" def forward(self, x):\n",
" h1 = self.linear1(x).relu()\n",
" h2 = self.linear2(h1).relu()\n",

View File

@@ -57,7 +57,7 @@
{
"cell_type": "markdown",
"source": [
"Let's build a model that maps a chemical structure to a binary decision. This model might be used to predict whether a chemical is liquid at room temparature or not. We'll start by drawing the chemical structure."
"Let's build a model that maps a chemical structure to a binary decision. This model might be used to predict whether a chemical is liquid at room temperature or not. We'll start by drawing the chemical structure."
],
"metadata": {
"id": "UNleESc7k5uB"
@@ -191,7 +191,7 @@
"source": [
"# Let's test this network\n",
"f = graph_neural_network(A,X, Omega0, beta0, Omega1, beta1, Omega2, beta2, omega3, beta3)\n",
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.498010\")"
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.310843\")"
],
"metadata": {
"id": "X7gYgOu6uIAt"
@@ -221,7 +221,7 @@
"X_permuted = np.copy(X)\n",
"\n",
"f = graph_neural_network(A_permuted,X_permuted, Omega0, beta0, Omega1, beta1, Omega2, beta2, omega3, beta3)\n",
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.498010\")"
"print(\"Your value is %3f: \"%(f[0,0]), \"True value of f: 0.310843\")"
],
"metadata": {
"id": "F0zc3U_UuR5K"
@@ -241,4 +241,4 @@
}
}
]
}
}

View File

@@ -268,7 +268,7 @@
"source": [
"# TODO Find the nodes in hidden layer 1 that connect to the nodes in hidden layer 2\n",
"# using the adjacency matrix. Then sample n_sample of these nodes randomly without\n",
"# replacement. Make sure not to sample nodes that were already included in hidden layer 2 our the ouput layer.\n",
"# replacement. Make sure not to sample nodes that were already included in hidden layer 2 our the output layer.\n",
"# The nodes at hidden layer 1 are the union of these nodes and the nodes in hidden layer 2\n",
"\n",
"# Replace this line:\n",
@@ -311,4 +311,4 @@
}
}
]
}
}

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyMBYNsjj1iTgHUYhAXqUYJd",
"authorship_tag": "ABX9TyOSEQVqxE5KrXmsZVh9M3gq",
"include_colab_link": true
},
"kernelspec": {
@@ -253,7 +253,7 @@
"pr_x1_x2_given_z_val = get_likelihood(x1_mesh,x2_mesh, z_val)\n",
"\n",
"# Plot the result\n",
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2_given_z_val, title=\"Conditional distribution $Pr(x1,x2|z)$\")\n",
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2_given_z_val, title=\"Conditional distribution $Pr(x_1,x_2|z)$\")\n",
"\n",
"# TODO -- Experiment with different values of z and make sure that you understand the what is happening."
],
@@ -292,7 +292,7 @@
"\n",
"\n",
"# Plot the result\n",
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2, title=\"Data density $Pr(x1,x2)$\")\n"
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2, title=\"Data density $Pr(x_1,x_2)$\")\n"
],
"metadata": {
"id": "H0Ijce9VzeCO"
@@ -341,7 +341,7 @@
"source": [
"x1_samples, x2_samples = draw_samples(500)\n",
"# Plot the result\n",
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2, x1_samples, x2_samples, title=\"Data density $Pr(x1,x2)$\")\n"
"plot_heatmap(x1_mesh, x2_mesh, pr_x1_x2, x1_samples, x2_samples, title=\"Data density $Pr(x_1,x_2)$\")\n"
],
"metadata": {
"id": "XRmWv99B-BWO"

View File

@@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyMvae+1cigwg2Htl6vt1Who",
"authorship_tag": "ABX9TyNecz9/CDOggPSmy1LjT/Dv",
"include_colab_link": true
},
"kernelspec": {
@@ -217,7 +217,7 @@
" \\mbox{f}[y]= 20.446\\exp\\left[-(y-3)^4\\right],\n",
" \\end{equation}\n",
"\n",
"which decreases rapidly as we move away from the position $y=4$."
"which decreases rapidly as we move away from the position $y=3$."
],
"metadata": {
"id": "6hxsl3Pxo1TT"

View File

@@ -403,7 +403,7 @@
" marginal_at_time_t = np.zeros_like(pr_x_true);\n",
"\n",
"\n",
" # TODO Write ths function\n",
" # TODO Write this function\n",
" # 1. For each x (value in x_plot_vals):\n",
" # 2. Compute the mean and variance of the diffusion kernel at time t\n",
" # 3. Compute pdf of this Gaussian at every x_plot_val\n",

View File

@@ -598,7 +598,7 @@
"source": [
"def markov_decision_process_step_deterministic(state, transition_probabilities_given_action, reward_structure, policy):\n",
" # TODO -- complete this function.\n",
" # For each state, theres is a corresponding action.\n",
" # For each state, there's is a corresponding action.\n",
" # Draw the next state based on the current state and that action\n",
" # and calculate the reward\n",
" # Replace this line:\n",
@@ -683,7 +683,7 @@
"source": [
"def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n",
" # TODO -- complete this function.\n",
" # For each state, theres is a corresponding distribution over actions\n",
" # For each state, there's is a corresponding distribution over actions\n",
" # Draw a sample from that distribution to get the action\n",
" # Draw the next state based on the current state and that action\n",
" # and calculate the reward\n",
@@ -733,4 +733,4 @@
"outputs": []
}
]
}
}

File diff suppressed because one or more lines are too long

View File

@@ -71,7 +71,7 @@
"cell_type": "code",
"source": [
"# Class that can describe interesting curve shapes based on the input parameters\n",
"# Details dont' matter\n",
"# Details don't matter\n",
"class FreqCurve:\n",
" def __init__(self, weight, mean1, mean2, sigma1, sigma2, prop):\n",
" self.mean1 = mean1\n",
@@ -438,4 +438,4 @@
}
}
]
}
}

Binary file not shown.

Binary file not shown.

View File

@@ -15,8 +15,8 @@
<ul>
<li>
<p style="font-size: larger; margin-bottom: 0">Download draft PDF Chapters 1-21 <a
href="https://github.com/udlbook/udlbook/releases/download/v1.16/UnderstandingDeepLearning_24_11_23_C.pdf">here</a>
</p>2023-11-24. CC-BY-NC-ND license<br>
href="https://github.com/udlbook/udlbook/releases/download/v1.17/UnderstandingDeepLearning_17_12_23_C.pdf">here</a>
</p>2023-12-17. CC-BY-NC-ND license<br>
<img src="https://img.shields.io/github/downloads/udlbook/udlbook/total" alt="download stats shield">
</li>
<li> Order your copy from <a href="https://mitpress.mit.edu/9780262048644/understanding-deep-learning/">here </a></li>