From a5413d6a15c1448a5fbde18b1973703a019dab40 Mon Sep 17 00:00:00 2001 From: Youcef Rahal Date: Fri, 5 Apr 2024 08:42:10 -0400 Subject: [PATCH] Fix inor typos in chap 8 notebooks --- Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb | 5 +++-- Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb | 8 ++++---- Notebooks/Chap08/8_3_Double_Descent.ipynb | 5 ++--- Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb b/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb index 0f8c1b3..162b6ee 100644 --- a/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb +++ b/Notebooks/Chap08/8_1_MNIST_1D_Performance.ipynb @@ -83,6 +83,8 @@ { "cell_type": "code", "source": [ + "!mkdir ./sample_data\n", + "\n", "args = mnist1d.data.get_dataset_args()\n", "data = mnist1d.data.get_dataset(args, path='./sample_data/mnist1d_data.pkl', download=False, regenerate=False)\n", "\n", @@ -136,7 +138,6 @@ "optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n", "# object that decreases learning rate by half every 10 epochs\n", "scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n", - "# create 100 dummy data points and store in data loader class\n", "x_train = torch.tensor(data['x'].astype('float32'))\n", "y_train = torch.tensor(data['y'].transpose().astype('long'))\n", "x_test= torch.tensor(data['x_test'].astype('float32'))\n", @@ -235,4 +236,4 @@ } } ] -} \ No newline at end of file +} diff --git a/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb b/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb index 269f56c..d75c770 100644 --- a/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb +++ b/Notebooks/Chap08/8_2_Bias_Variance_Trade_Off.ipynb @@ -92,7 +92,7 @@ { "cell_type": "code", "source": [ - "# Draw the fitted function, together win uncertainty used to generate points\n", + "# Draw the fitted function, together with uncertainty used to generate points\n", "def plot_function(x_func, y_func, x_data=None,y_data=None, x_model = None, y_model =None, sigma_func = None, sigma_model=None):\n", "\n", " fig,ax = plt.subplots()\n", @@ -203,7 +203,7 @@ "# Closed form solution\n", "beta, omega = fit_model_closed_form(x_data,y_data,n_hidden=3)\n", "\n", - "# Get prediction for model across graph grange\n", + "# Get prediction for model across graph range\n", "x_model = np.linspace(0,1,100);\n", "y_model = network(x_model, beta, omega)\n", "\n", @@ -302,7 +302,7 @@ "sigma_func = 0.3\n", "n_hidden = 5\n", "\n", - "# Set random seed so that get same result every time\n", + "# Set random seed so that we get the same result every time\n", "np.random.seed(1)\n", "\n", "for c_hidden in range(len(hidden_variables)):\n", @@ -344,4 +344,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/Notebooks/Chap08/8_3_Double_Descent.ipynb b/Notebooks/Chap08/8_3_Double_Descent.ipynb index ee745d6..cb380cc 100644 --- a/Notebooks/Chap08/8_3_Double_Descent.ipynb +++ b/Notebooks/Chap08/8_3_Double_Descent.ipynb @@ -124,7 +124,7 @@ " D_k = n_hidden # Hidden dimensions\n", " D_o = 10 # Output dimensions\n", "\n", - " # Define a model with two hidden layers of size 100\n", + " # Define a model with two hidden layers\n", " # And ReLU activations between them\n", " model = nn.Sequential(\n", " nn.Linear(D_i, D_k),\n", @@ -157,7 +157,6 @@ " optimizer = torch.optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)\n", "\n", "\n", - " # create 100 dummy data points and store in data loader class\n", " x_train = torch.tensor(data['x'].astype('float32'))\n", " y_train = torch.tensor(data['y'].transpose().astype('long'))\n", " x_test= torch.tensor(data['x_test'].astype('float32'))\n", @@ -267,4 +266,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb b/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb index e162f17..80b27f0 100644 --- a/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb +++ b/Notebooks/Chap08/8_4_High_Dimensional_Spaces.ipynb @@ -224,7 +224,7 @@ { "cell_type": "markdown", "source": [ - "You should see see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent.

\n", + "You should see that by the time we get to 300 dimensions most of the volume is in the outer 1 percent.

\n", "\n", "The conclusion of all of this is that in high dimensions you should be sceptical of your intuitions about how things work. I have tried to visualize many things in one or two dimensions in the book, but you should also be sceptical about these visualizations!" ], @@ -233,4 +233,4 @@ } } ] -} \ No newline at end of file +}