diff --git a/Notebooks/Chap07/7_2_Backpropagation.ipynb b/Notebooks/Chap07/7_2_Backpropagation.ipynb index ffc0b80..3e0c65b 100644 --- a/Notebooks/Chap07/7_2_Backpropagation.ipynb +++ b/Notebooks/Chap07/7_2_Backpropagation.ipynb @@ -4,7 +4,7 @@ "metadata": { "colab": { "provenance": [], - "authorship_tag": "ABX9TyPOadzPTZy+kvsBZs5D7n5M", + "authorship_tag": "ABX9TyOlKB4TrCJnt91TnHOrfRSJ", "include_colab_link": true }, "kernelspec": { @@ -259,13 +259,13 @@ " # REPLACE THIS LINE\n", " all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n", "\n", - " # TODO: calculate the derivatives of the loss with respect to th eactivations from weight and derivatives of next preactivations (eq 7.20)\n", + " # TODO: calculate the derivatives of the loss with respect to the activations from weight and derivatives of next preactivations (second part of last line of eq 7.24)\n", " # REPLACE THIS LINE\n", " all_dl_dh[layer] = np.zeros_like(all_h[layer])\n", "\n", "\n", " if layer > 0:\n", - " # TODO Calculate the derivatives of the loss with respect to the pre-activation f with respect to activation h (deriv of ReLu function)\n", + " # TODO Calculate the derivatives of the loss with respect to the pre-activation f (use deriv of ReLu function, first part of last line of eq. 7.24)\n", " # REPLACE THIS LINE\n", " all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n", "\n",