Merge pull request #164 from yrahal/main

Fix minor typos in Chap07 notebooks
This commit is contained in:
udlbook
2024-03-25 16:43:55 -04:00
committed by GitHub
3 changed files with 15 additions and 15 deletions

View File

@@ -120,7 +120,7 @@
" K = len(all_weights)-1\n",
"\n",
" # We'll store the pre-activations at each layer in a list \"all_f\"\n",
" # and the activations in a second list[all_h].\n",
" # and the activations in a second list \"all_h\".\n",
" all_f = [None] * (K+1)\n",
" all_h = [None] * (K+1)\n",
"\n",
@@ -151,7 +151,7 @@
{
"cell_type": "markdown",
"source": [
"Now let's investigate how this the size of the outputs vary as we change the initialization variance:\n"
"Now let's investigate how the size of the outputs vary as we change the initialization variance:\n"
],
"metadata": {
"id": "bIUrcXnOqChl"
@@ -164,7 +164,7 @@
"K = 5\n",
"# Number of neurons per layer\n",
"D = 8\n",
" # Input layer\n",
"# Input layer\n",
"D_i = 1\n",
"# Output layer\n",
"D_o = 1\n",
@@ -196,7 +196,7 @@
"# Change this to 50 layers with 80 hidden units per layer\n",
"\n",
"# TO DO\n",
"# Now experiment with sigma_sq_omega to try to stop the variance of the forward computation explode"
"# Now experiment with sigma_sq_omega to try to stop the variance of the forward computation exploding"
],
"metadata": {
"id": "VL_SO4tar3DC"
@@ -300,7 +300,7 @@
"K = 5\n",
"# Number of neurons per layer\n",
"D = 8\n",
" # Input layer\n",
"# Input layer\n",
"D_i = 1\n",
"# Output layer\n",
"D_o = 1\n",