Merge pull request #273 from fredhsu/patch-1

Update 7_2_Backpropagation.ipynb to fix equation references
This commit is contained in:
udlbook
2025-03-04 14:00:59 -05:00
committed by GitHub

View File

@@ -248,7 +248,7 @@
"\n",
" # Now work backwards through the network\n",
" for layer in range(K,-1,-1):\n",
" # TODO Calculate the derivatives of the loss with respect to the biases at layer from all_dl_df[layer]. (eq 7.21)\n",
" # TODO Calculate the derivatives of the loss with respect to the biases at layer from all_dl_df[layer]. (eq 7.22)\n",
" # NOTE! To take a copy of matrix X, use Z=np.array(X)\n",
" # REPLACE THIS LINE\n",
" all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n",
@@ -258,13 +258,13 @@
" # REPLACE THIS LINE\n",
" all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n",
"\n",
" # TODO: calculate the derivatives of the loss with respect to the activations from weight and derivatives of next preactivations (second part of last line of eq 7.24)\n",
" # TODO: calculate the derivatives of the loss with respect to the activations from weight and derivatives of next preactivations (second part of last line of eq 7.25)\n",
" # REPLACE THIS LINE\n",
" all_dl_dh[layer] = np.zeros_like(all_h[layer])\n",
"\n",
"\n",
" if layer > 0:\n",
" # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use derivative of ReLu function, first part of last line of eq. 7.24)\n",
" # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use derivative of ReLu function, first part of last line of eq. 7.25)\n",
" # REPLACE THIS LINE\n",
" all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n",
"\n",