From d9e7306ef4632046ee4aa70a42d62cd9260064a2 Mon Sep 17 00:00:00 2001 From: udlbook <110402648+udlbook@users.noreply.github.com> Date: Wed, 20 Dec 2023 15:23:18 -0500 Subject: [PATCH] Update 19_1_Markov_Decision_Processes.ipynb --- Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb b/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb index c5cd6d6..ddac7cc 100644 --- a/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb +++ b/Notebooks/Chap19/19_1_Markov_Decision_Processes.ipynb @@ -598,7 +598,7 @@ "source": [ "def markov_decision_process_step_deterministic(state, transition_probabilities_given_action, reward_structure, policy):\n", " # TODO -- complete this function.\n", - " # For each state, theres is a corresponding action.\n", + " # For each state, there's is a corresponding action.\n", " # Draw the next state based on the current state and that action\n", " # and calculate the reward\n", " # Replace this line:\n", @@ -683,7 +683,7 @@ "source": [ "def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n", " # TODO -- complete this function.\n", - " # For each state, theres is a corresponding distribution over actions\n", + " # For each state, there's is a corresponding distribution over actions\n", " # Draw a sample from that distribution to get the action\n", " # Draw the next state based on the current state and that action\n", " # and calculate the reward\n", @@ -733,4 +733,4 @@ "outputs": [] } ] -} \ No newline at end of file +}