diff --git a/Notebooks/Chap07/7_2_Backpropagation.ipynb b/Notebooks/Chap07/7_2_Backpropagation.ipynb index 40975f9..272e269 100644 --- a/Notebooks/Chap07/7_2_Backpropagation.ipynb +++ b/Notebooks/Chap07/7_2_Backpropagation.ipynb @@ -248,7 +248,7 @@ "\n", " # Now work backwards through the network\n", " for layer in range(K,-1,-1):\n", - " # TODO Calculate the derivatives of the loss with respect to the biases at layer from all_dl_df[layer]. (eq 7.21)\n", + " # TODO Calculate the derivatives of the loss with respect to the biases at layer from all_dl_df[layer]. (eq 7.22)\n", " # NOTE! To take a copy of matrix X, use Z=np.array(X)\n", " # REPLACE THIS LINE\n", " all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n", @@ -258,13 +258,13 @@ " # REPLACE THIS LINE\n", " all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n", "\n", - " # TODO: calculate the derivatives of the loss with respect to the activations from weight and derivatives of next preactivations (second part of last line of eq 7.24)\n", + " # TODO: calculate the derivatives of the loss with respect to the activations from weight and derivatives of next preactivations (second part of last line of eq 7.25)\n", " # REPLACE THIS LINE\n", " all_dl_dh[layer] = np.zeros_like(all_h[layer])\n", "\n", "\n", " if layer > 0:\n", - " # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use derivative of ReLu function, first part of last line of eq. 7.24)\n", + " # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use derivative of ReLu function, first part of last line of eq. 7.25)\n", " # REPLACE THIS LINE\n", " all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n", "\n", @@ -352,4 +352,4 @@ "outputs": [] } ] -} \ No newline at end of file +}