Update 19_1_Markov_Decision_Processes.ipynb

This commit is contained in:
udlbook
2023-12-20 15:23:18 -05:00
committed by GitHub
parent d5304c8034
commit d9e7306ef4

View File

@@ -598,7 +598,7 @@
"source": [ "source": [
"def markov_decision_process_step_deterministic(state, transition_probabilities_given_action, reward_structure, policy):\n", "def markov_decision_process_step_deterministic(state, transition_probabilities_given_action, reward_structure, policy):\n",
" # TODO -- complete this function.\n", " # TODO -- complete this function.\n",
" # For each state, theres is a corresponding action.\n", " # For each state, there's is a corresponding action.\n",
" # Draw the next state based on the current state and that action\n", " # Draw the next state based on the current state and that action\n",
" # and calculate the reward\n", " # and calculate the reward\n",
" # Replace this line:\n", " # Replace this line:\n",
@@ -683,7 +683,7 @@
"source": [ "source": [
"def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n", "def markov_decision_process_step_stochastic(state, transition_probabilities_given_action, reward_structure, stochastic_policy):\n",
" # TODO -- complete this function.\n", " # TODO -- complete this function.\n",
" # For each state, theres is a corresponding distribution over actions\n", " # For each state, there's is a corresponding distribution over actions\n",
" # Draw a sample from that distribution to get the action\n", " # Draw a sample from that distribution to get the action\n",
" # Draw the next state based on the current state and that action\n", " # Draw the next state based on the current state and that action\n",
" # and calculate the reward\n", " # and calculate the reward\n",
@@ -733,4 +733,4 @@
"outputs": [] "outputs": []
} }
] ]
} }