diff --git a/Notebooks/Chap07/7_3_Initialization.ipynb b/Notebooks/Chap07/7_3_Initialization.ipynb index c4ae98b..d861b2f 100644 --- a/Notebooks/Chap07/7_3_Initialization.ipynb +++ b/Notebooks/Chap07/7_3_Initialization.ipynb @@ -4,7 +4,7 @@ "metadata": { "colab": { "provenance": [], - "authorship_tag": "ABX9TyNHLXFpiSnUzAbzhtOk+bxu", + "authorship_tag": "ABX9TyOaATWBrwVMylV1akcKtHjt", "include_colab_link": true }, "kernelspec": { @@ -117,7 +117,7 @@ "def compute_network_output(net_input, all_weights, all_biases):\n", "\n", " # Retrieve number of layers\n", - " K = len(all_weights) -1\n", + " K = len(all_weights)-1\n", "\n", " # We'll store the pre-activations at each layer in a list \"all_f\"\n", " # and the activations in a second list[all_h].\n", @@ -177,7 +177,7 @@ "data_in = np.random.normal(size=(1,n_data))\n", "net_output, all_f, all_h = compute_network_output(data_in, all_weights, all_biases)\n", "\n", - "for layer in range(K):\n", + "for layer in range(1,K+1):\n", " print(\"Layer %d, std of hidden units = %3.3f\"%(layer, np.std(all_h[layer])))" ], "metadata": { @@ -249,6 +249,9 @@ "\n", "# Main backward pass routine\n", "def backward_pass(all_weights, all_biases, all_f, all_h, y):\n", + " # Retrieve number of layers\n", + " K = all_weights\n", + "\n", " # We'll store the derivatives dl_dweights and dl_dbiases in lists as well\n", " all_dl_dweights = [None] * (K+1)\n", " all_dl_dbiases = [None] * (K+1)\n",