Fix inor typos in chap 8 notebooks

This commit is contained in:
Youcef Rahal
2024-04-05 08:42:10 -04:00
parent 4652f90f09
commit a5413d6a15
4 changed files with 11 additions and 11 deletions

View File

@@ -83,6 +83,8 @@
{
"cell_type": "code",
"source": [
"!mkdir ./sample_data\n",
"\n",
"args = mnist1d.data.get_dataset_args()\n",
"data = mnist1d.data.get_dataset(args, path='./sample_data/mnist1d_data.pkl', download=False, regenerate=False)\n",
"\n",
@@ -136,7 +138,6 @@
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n",
"# object that decreases learning rate by half every 10 epochs\n",
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n",
"# create 100 dummy data points and store in data loader class\n",
"x_train = torch.tensor(data['x'].astype('float32'))\n",
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
"x_test= torch.tensor(data['x_test'].astype('float32'))\n",
@@ -235,4 +236,4 @@
}
}
]
}
}

View File

@@ -92,7 +92,7 @@
{
"cell_type": "code",
"source": [
"# Draw the fitted function, together win uncertainty used to generate points\n",
"# Draw the fitted function, together with uncertainty used to generate points\n",
"def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n",
"\n",
" fig,ax = plt.subplots()\n",
@@ -203,7 +203,7 @@
"# Closed form solution\n",
"beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=3)\n",
"\n",
"# Get prediction for model across graph grange\n",
"# Get prediction for model across graph range\n",
"x_model = np.linspace(0,1,100);\n",
"y_model = network(x_model, beta, omega)\n",
"\n",
@@ -302,7 +302,7 @@
"sigma_func = 0.3\n",
"n_hidden = 5\n",
"\n",
"# Set random seed so that get same result every time\n",
"# Set random seed so that we get the same result every time\n",
"np.random.seed(1)\n",
"\n",
"for c_hidden in range(len(hidden_variables)):\n",
@@ -344,4 +344,4 @@
"outputs": []
}
]
}
}

View File

@@ -124,7 +124,7 @@
" D_k = n_hidden # Hidden dimensions\n",
" D_o = 10 # Output dimensions\n",
"\n",
" # Define a model with two hidden layers of size 100\n",
" # Define a model with two hidden layers\n",
" # And ReLU activations between them\n",
" model = nn.Sequential(\n",
" nn.Linear(D_i, D_k),\n",
@@ -157,7 +157,6 @@
" optimizer = torch.optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)\n",
"\n",
"\n",
" # create 100 dummy data points and store in data loader class\n",
" x_train = torch.tensor(data['x'].astype('float32'))\n",
" y_train = torch.tensor(data['y'].transpose().astype('long'))\n",
" x_test= torch.tensor(data['x_test'].astype('float32'))\n",
@@ -267,4 +266,4 @@
"outputs": []
}
]
}
}

View File

@@ -224,7 +224,7 @@
{
"cell_type": "markdown",
"source": [
"You should see see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent. <br><br>\n",
"You should see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent. <br><br>\n",
"\n",
"The conclusion of all of this is that in high dimensions you should be sceptical of your intuitions about how things work. I have tried to visualize many things in one or two dimensions in the book, but you should also be sceptical about these visualizations!"
],
@@ -233,4 +233,4 @@
}
}
]
}
}